ppo-Pyramids / run_logs /timers.json
prepsyched's picture
First Push
79dd6be
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4181099534034729,
"min": 0.4181099534034729,
"max": 1.5149829387664795,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12536.6083984375,
"min": 12536.6083984375,
"max": 45958.5234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2663339376449585,
"min": -0.10260331630706787,
"max": 0.44559338688850403,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 68.71415710449219,
"min": -24.727399826049805,
"max": 117.63665771484375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.09756793826818466,
"min": -0.03498930111527443,
"max": 0.26096993684768677,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 25.172527313232422,
"min": -8.922271728515625,
"max": 61.849876403808594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06844425049032599,
"min": 0.06494964779917468,
"max": 0.07402353658558748,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9582195068645638,
"min": 0.4892747904155297,
"max": 1.0658862349228,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011957374548703218,
"min": 0.0005443592410085995,
"max": 0.013725443249667802,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16740324368184506,
"min": 0.007076670133111793,
"max": 0.19215620549534923,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.282619001064289e-06,
"min": 7.282619001064289e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010195666601490005,
"min": 0.00010195666601490005,
"max": 0.0036331204889599004,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242750714285716,
"min": 0.10242750714285716,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339851000000001,
"min": 1.3886848,
"max": 2.6110401000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025250796357142866,
"min": 0.00025250796357142866,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035351114900000016,
"min": 0.0035351114900000016,
"max": 0.12112290599,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011283555068075657,
"min": 0.011283555068075657,
"max": 0.3951511085033417,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15796977281570435,
"min": 0.15796977281570435,
"max": 2.7660577297210693,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 545.3518518518518,
"min": 453.40909090909093,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29449.0,
"min": 15984.0,
"max": 33123.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1611962018991417,
"min": -1.0000000521540642,
"max": 1.3344181463348144,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 61.54339870065451,
"min": -30.74000172317028,
"max": 88.07159765809774,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1611962018991417,
"min": -1.0000000521540642,
"max": 1.3344181463348144,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 61.54339870065451,
"min": -30.74000172317028,
"max": 88.07159765809774,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06319271496119294,
"min": 0.05841310634842199,
"max": 7.6876135896891356,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.349213892943226,
"min": 3.349213892943226,
"max": 123.00181743502617,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682495965",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682498243"
},
"total": 2277.742449114,
"count": 1,
"self": 0.4861601210000117,
"children": {
"run_training.setup": {
"total": 0.17232554499997832,
"count": 1,
"self": 0.17232554499997832
},
"TrainerController.start_learning": {
"total": 2277.083963448,
"count": 1,
"self": 1.6843214539667315,
"children": {
"TrainerController._reset_env": {
"total": 4.744244991999949,
"count": 1,
"self": 4.744244991999949
},
"TrainerController.advance": {
"total": 2270.561652773033,
"count": 63488,
"self": 1.7480010220333497,
"children": {
"env_step": {
"total": 1623.4389054400071,
"count": 63488,
"self": 1501.0141843339209,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.41560811105194,
"count": 63488,
"self": 5.30598055306541,
"children": {
"TorchPolicy.evaluate": {
"total": 116.10962755798653,
"count": 62557,
"self": 116.10962755798653
}
}
},
"workers": {
"total": 1.009112995034343,
"count": 63488,
"self": 0.0,
"children": {
"worker_root": {
"total": 2271.046936921068,
"count": 63488,
"is_parallel": true,
"self": 898.4567409451063,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0059565390000102525,
"count": 1,
"is_parallel": true,
"self": 0.003807540999957837,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021489980000524156,
"count": 8,
"is_parallel": true,
"self": 0.0021489980000524156
}
}
},
"UnityEnvironment.step": {
"total": 0.04832424500000343,
"count": 1,
"is_parallel": true,
"self": 0.0005474150000281952,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005200980000381605,
"count": 1,
"is_parallel": true,
"self": 0.0005200980000381605
},
"communicator.exchange": {
"total": 0.0455827809999505,
"count": 1,
"is_parallel": true,
"self": 0.0455827809999505
},
"steps_from_proto": {
"total": 0.0016739509999865732,
"count": 1,
"is_parallel": true,
"self": 0.0003718760000879229,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013020749998986503,
"count": 8,
"is_parallel": true,
"self": 0.0013020749998986503
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1372.590195975962,
"count": 63487,
"is_parallel": true,
"self": 34.622340077979516,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.677291746017204,
"count": 63487,
"is_parallel": true,
"self": 25.677291746017204
},
"communicator.exchange": {
"total": 1208.0918588880236,
"count": 63487,
"is_parallel": true,
"self": 1208.0918588880236
},
"steps_from_proto": {
"total": 104.19870526394175,
"count": 63487,
"is_parallel": true,
"self": 23.06646933482989,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.13223592911186,
"count": 507896,
"is_parallel": true,
"self": 81.13223592911186
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 645.3747463109926,
"count": 63488,
"self": 3.0282433300051252,
"children": {
"process_trajectory": {
"total": 115.83071146098501,
"count": 63488,
"self": 115.62044697098491,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2102644900000996,
"count": 2,
"self": 0.2102644900000996
}
}
},
"_update_policy": {
"total": 526.5157915200024,
"count": 451,
"self": 338.41808778798895,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.09770373201343,
"count": 22803,
"self": 188.09770373201343
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.619998309062794e-07,
"count": 1,
"self": 9.619998309062794e-07
},
"TrainerController._save_models": {
"total": 0.09374326700026359,
"count": 1,
"self": 0.001396547000240389,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0923467200000232,
"count": 1,
"self": 0.0923467200000232
}
}
}
}
}
}
}