ppo-Pyramids / run_logs /timers.json
GillesEverling's picture
First Push
75ae54e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8369749188423157,
"min": 0.8090046644210815,
"max": 1.4497430324554443,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 25470.8203125,
"min": 24231.30859375,
"max": 43979.40625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989990.0,
"min": 29952.0,
"max": 989990.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989990.0,
"min": 29952.0,
"max": 989990.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2350318729877472,
"min": -0.11264254152774811,
"max": 0.2464633584022522,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 59.933128356933594,
"min": -27.25949478149414,
"max": 63.587547302246094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.04709357023239136,
"min": -0.0014552576467394829,
"max": 0.3850932717323303,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 12.00886058807373,
"min": -0.36526966094970703,
"max": 91.26710510253906,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07090225976555505,
"min": 0.06392303770518906,
"max": 0.07303618280473968,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0635338964833256,
"min": 0.4781546034587911,
"max": 1.0635338964833256,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011282450104004561,
"min": 0.00013776707802311722,
"max": 0.014766214985554272,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16923675156006843,
"min": 0.0019287390923236412,
"max": 0.2067270097977598,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.440157519979997e-06,
"min": 7.440157519979997e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011160236279969996,
"min": 0.00011160236279969996,
"max": 0.003508919630360199,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248002000000002,
"min": 0.10248002000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5372003000000003,
"min": 1.3886848,
"max": 2.5696398000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025775399799999986,
"min": 0.00025775399799999986,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003866309969999998,
"min": 0.003866309969999998,
"max": 0.11698701602,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010654088109731674,
"min": 0.010488994419574738,
"max": 0.35650160908699036,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15981131792068481,
"min": 0.14684592187404633,
"max": 2.495511293411255,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 572.469387755102,
"min": 572.469387755102,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28051.0,
"min": 15984.0,
"max": 32739.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1009183437848578,
"min": -1.0000000521540642,
"max": 1.1009183437848578,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 53.94499884545803,
"min": -31.99440163373947,
"max": 57.66659864783287,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1009183437848578,
"min": -1.0000000521540642,
"max": 1.1009183437848578,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 53.94499884545803,
"min": -31.99440163373947,
"max": 57.66659864783287,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06304745051572669,
"min": 0.06304745051572669,
"max": 7.048223110847175,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.089325075270608,
"min": 3.089325075270608,
"max": 112.7715697735548,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681395906",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681397862"
},
"total": 1955.6913732950002,
"count": 1,
"self": 0.43588859700003013,
"children": {
"run_training.setup": {
"total": 0.11331390299983468,
"count": 1,
"self": 0.11331390299983468
},
"TrainerController.start_learning": {
"total": 1955.1421707950003,
"count": 1,
"self": 1.3933385219957017,
"children": {
"TrainerController._reset_env": {
"total": 4.01655364699991,
"count": 1,
"self": 4.01655364699991
},
"TrainerController.advance": {
"total": 1949.6285327700052,
"count": 63280,
"self": 1.4412048679600957,
"children": {
"env_step": {
"total": 1351.2243465010806,
"count": 63280,
"self": 1245.4393796631048,
"children": {
"SubprocessEnvManager._take_step": {
"total": 104.97075020602324,
"count": 63280,
"self": 4.7408724349234035,
"children": {
"TorchPolicy.evaluate": {
"total": 100.22987777109984,
"count": 62561,
"self": 100.22987777109984
}
}
},
"workers": {
"total": 0.8142166319526041,
"count": 63280,
"self": 0.0,
"children": {
"worker_root": {
"total": 1950.4691251900576,
"count": 63280,
"is_parallel": true,
"self": 812.231106169088,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001752971999849251,
"count": 1,
"is_parallel": true,
"self": 0.0005749389993070508,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011780330005422002,
"count": 8,
"is_parallel": true,
"self": 0.0011780330005422002
}
}
},
"UnityEnvironment.step": {
"total": 0.09923613900036798,
"count": 1,
"is_parallel": true,
"self": 0.0005393370006459008,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004712869999821123,
"count": 1,
"is_parallel": true,
"self": 0.0004712869999821123
},
"communicator.exchange": {
"total": 0.09662423300005685,
"count": 1,
"is_parallel": true,
"self": 0.09662423300005685
},
"steps_from_proto": {
"total": 0.001601281999683124,
"count": 1,
"is_parallel": true,
"self": 0.00044214599938641186,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011591360002967122,
"count": 8,
"is_parallel": true,
"self": 0.0011591360002967122
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1138.2380190209697,
"count": 63279,
"is_parallel": true,
"self": 31.67930967097709,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.33917068711844,
"count": 63279,
"is_parallel": true,
"self": 23.33917068711844
},
"communicator.exchange": {
"total": 989.5725778649025,
"count": 63279,
"is_parallel": true,
"self": 989.5725778649025
},
"steps_from_proto": {
"total": 93.64696079797159,
"count": 63279,
"is_parallel": true,
"self": 20.1513044739404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.49565632403119,
"count": 506232,
"is_parallel": true,
"self": 73.49565632403119
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 596.9629814009645,
"count": 63280,
"self": 2.604591855862054,
"children": {
"process_trajectory": {
"total": 103.71332395409945,
"count": 63280,
"self": 103.45949447109933,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25382948300011776,
"count": 2,
"self": 0.25382948300011776
}
}
},
"_update_policy": {
"total": 490.645065591003,
"count": 448,
"self": 313.1579538139031,
"children": {
"TorchPPOOptimizer.update": {
"total": 177.48711177709993,
"count": 22809,
"self": 177.48711177709993
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0349995136493817e-06,
"count": 1,
"self": 1.0349995136493817e-06
},
"TrainerController._save_models": {
"total": 0.10374482099996385,
"count": 1,
"self": 0.001430348000212689,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10231447299975116,
"count": 1,
"self": 0.10231447299975116
}
}
}
}
}
}
}