testpyramidsrnd / run_logs /timers.json
alefarasin's picture
First Pyramids
a2d9c86
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.775467574596405,
"min": 0.775467574596405,
"max": 1.4535527229309082,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 23400.509765625,
"min": 23400.509765625,
"max": 44094.9765625,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479957.0,
"min": 29975.0,
"max": 479957.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479957.0,
"min": 29975.0,
"max": 479957.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.325147807598114,
"min": -0.10404781997203827,
"max": 0.37916868925094604,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 85.51387023925781,
"min": -24.763381958007812,
"max": 100.85887145996094,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.10621927678585052,
"min": -0.21033114194869995,
"max": 0.22147221863269806,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 27.9356689453125,
"min": -55.948081970214844,
"max": 53.596275329589844,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06863481113261607,
"min": 0.06468206482739852,
"max": 0.07105041485679589,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.960887355856625,
"min": 0.4823158504454761,
"max": 1.0401752325803197,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01161268330475696,
"min": 0.0014182857658531186,
"max": 0.021181143804849353,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16257756626659745,
"min": 0.014182857658531186,
"max": 0.3177171570727403,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.052703601482857e-05,
"min": 2.052703601482857e-05,
"max": 0.0002904748317464857,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00028737850420759996,
"min": 0.00028737850420759996,
"max": 0.0030659805780066,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10684231428571427,
"min": 0.10684231428571427,
"max": 0.1968249428571429,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4957923999999998,
"min": 1.3777746000000002,
"max": 2.4219934,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000693547197142857,
"min": 0.000693547197142857,
"max": 0.009682811791428571,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.009709660759999999,
"min": 0.009709660759999999,
"max": 0.10223714065999999,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01488067489117384,
"min": 0.014855831861495972,
"max": 0.43215659260749817,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2083294540643692,
"min": 0.2083294540643692,
"max": 3.0250961780548096,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 549.8360655737705,
"min": 484.8474576271187,
"max": 993.5,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 33540.0,
"min": 16518.0,
"max": 33540.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0825199653704962,
"min": -0.9277467176318168,
"max": 1.3248866386711597,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 64.95119792222977,
"min": -29.345001719892025,
"max": 79.49319832026958,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0825199653704962,
"min": -0.9277467176318168,
"max": 1.3248866386711597,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 64.95119792222977,
"min": -29.345001719892025,
"max": 79.49319832026958,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08535324649274116,
"min": 0.07551089621354211,
"max": 8.468867305885343,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.12119478956447,
"min": 4.455142876598984,
"max": 143.97074420005083,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657556153",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1657557164"
},
"total": 1010.4290942160001,
"count": 1,
"self": 0.4359122689998003,
"children": {
"run_training.setup": {
"total": 0.04097649600021214,
"count": 1,
"self": 0.04097649600021214
},
"TrainerController.start_learning": {
"total": 1009.952205451,
"count": 1,
"self": 0.6580836910122798,
"children": {
"TrainerController._reset_env": {
"total": 10.072324406999996,
"count": 1,
"self": 10.072324406999996
},
"TrainerController.advance": {
"total": 999.1261248929877,
"count": 31773,
"self": 0.6877285939872309,
"children": {
"env_step": {
"total": 648.0082168149713,
"count": 31773,
"self": 596.1937606409804,
"children": {
"SubprocessEnvManager._take_step": {
"total": 51.46355741701018,
"count": 31773,
"self": 2.246374353032479,
"children": {
"TorchPolicy.evaluate": {
"total": 49.2171830639777,
"count": 31299,
"self": 17.113333896963923,
"children": {
"TorchPolicy.sample_actions": {
"total": 32.10384916701378,
"count": 31299,
"self": 32.10384916701378
}
}
}
}
},
"workers": {
"total": 0.35089875698076867,
"count": 31773,
"self": 0.0,
"children": {
"worker_root": {
"total": 1007.9281917560099,
"count": 31773,
"is_parallel": true,
"self": 460.7510374319986,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008449707999943712,
"count": 1,
"is_parallel": true,
"self": 0.0053065460001562315,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0031431619997874805,
"count": 8,
"is_parallel": true,
"self": 0.0031431619997874805
}
}
},
"UnityEnvironment.step": {
"total": 0.047349387999929604,
"count": 1,
"is_parallel": true,
"self": 0.0005596359999344713,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044537999997373845,
"count": 1,
"is_parallel": true,
"self": 0.00044537999997373845
},
"communicator.exchange": {
"total": 0.04448806699997476,
"count": 1,
"is_parallel": true,
"self": 0.04448806699997476
},
"steps_from_proto": {
"total": 0.001856305000046632,
"count": 1,
"is_parallel": true,
"self": 0.0005825400003232062,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012737649997234257,
"count": 8,
"is_parallel": true,
"self": 0.0012737649997234257
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 547.1771543240113,
"count": 31772,
"is_parallel": true,
"self": 13.797195006033462,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.369438549985489,
"count": 31772,
"is_parallel": true,
"self": 11.369438549985489
},
"communicator.exchange": {
"total": 476.31782719302896,
"count": 31772,
"is_parallel": true,
"self": 476.31782719302896
},
"steps_from_proto": {
"total": 45.692693574963414,
"count": 31772,
"is_parallel": true,
"self": 11.238211301847741,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.45448227311567,
"count": 254176,
"is_parallel": true,
"self": 34.45448227311567
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 350.43017948402917,
"count": 31773,
"self": 1.172426093031845,
"children": {
"process_trajectory": {
"total": 79.70617045799895,
"count": 31773,
"self": 79.60031695199882,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10585350600013044,
"count": 1,
"self": 0.10585350600013044
}
}
},
"_update_policy": {
"total": 269.5515829329984,
"count": 219,
"self": 105.58953385600375,
"children": {
"TorchPPOOptimizer.update": {
"total": 163.96204907699462,
"count": 11406,
"self": 163.96204907699462
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1260003702773247e-06,
"count": 1,
"self": 1.1260003702773247e-06
},
"TrainerController._save_models": {
"total": 0.09567133399968952,
"count": 1,
"self": 0.0016238129996963835,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09404752099999314,
"count": 1,
"self": 0.09404752099999314
}
}
}
}
}
}
}