ppo-PyramidRND / run_logs /timers.json
Hinova's picture
First Push
b239d28
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3767026364803314,
"min": 0.3630126416683197,
"max": 1.5247355699539185,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11307.1064453125,
"min": 10954.26953125,
"max": 46254.37890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989991.0,
"min": 29952.0,
"max": 989991.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989991.0,
"min": 29952.0,
"max": 989991.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5573706030845642,
"min": -0.09127236157655716,
"max": 0.6314180493354797,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 159.40798950195312,
"min": -22.08791160583496,
"max": 181.84840393066406,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022041238844394684,
"min": -0.002276202430948615,
"max": 0.17487747967243195,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.3037943840026855,
"min": -0.5736030340194702,
"max": 41.445960998535156,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06614015062916137,
"min": 0.06614015062916137,
"max": 0.07547961704854277,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9259621088082591,
"min": 0.471874287046543,
"max": 1.0567146386795987,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015092360251036954,
"min": 0.000764950480987412,
"max": 0.01519628140344543,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21129304351451736,
"min": 0.009179405771848944,
"max": 0.22794422105168147,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.559047480350001e-06,
"min": 7.559047480350001e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010582666472490001,
"min": 0.00010582666472490001,
"max": 0.0035081129306290997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251965,
"min": 0.10251965,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352751000000001,
"min": 1.3691136000000002,
"max": 2.5693709,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002617130350000001,
"min": 0.0002617130350000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003663982490000001,
"min": 0.003663982490000001,
"max": 0.11696015291,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00963977724313736,
"min": 0.00963977724313736,
"max": 0.2444853037595749,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13495688140392303,
"min": 0.13495688140392303,
"max": 1.7113971710205078,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 320.6222222222222,
"min": 304.9375,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28856.0,
"min": 15984.0,
"max": 33150.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.634922200275792,
"min": -1.0000000521540642,
"max": 1.653383310418576,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 147.14299802482128,
"min": -32.000001668930054,
"max": 158.7247978001833,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.634922200275792,
"min": -1.0000000521540642,
"max": 1.653383310418576,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 147.14299802482128,
"min": -32.000001668930054,
"max": 158.7247978001833,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03246868614999888,
"min": 0.03246868614999888,
"max": 5.070586137473583,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.922181753499899,
"min": 2.922181753499899,
"max": 81.12937819957733,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685326855",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685329188"
},
"total": 2332.1673570569997,
"count": 1,
"self": 0.4888529049990211,
"children": {
"run_training.setup": {
"total": 0.037366968000242196,
"count": 1,
"self": 0.037366968000242196
},
"TrainerController.start_learning": {
"total": 2331.6411371840004,
"count": 1,
"self": 1.5655369580472325,
"children": {
"TrainerController._reset_env": {
"total": 3.7790052780001133,
"count": 1,
"self": 3.7790052780001133
},
"TrainerController.advance": {
"total": 2326.194488276952,
"count": 63953,
"self": 1.5663093799767012,
"children": {
"env_step": {
"total": 1658.1033306550098,
"count": 63953,
"self": 1537.3639244009723,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.81768000210877,
"count": 63953,
"self": 5.294507421077469,
"children": {
"TorchPolicy.evaluate": {
"total": 114.5231725810313,
"count": 62569,
"self": 114.5231725810313
}
}
},
"workers": {
"total": 0.9217262519287033,
"count": 63953,
"self": 0.0,
"children": {
"worker_root": {
"total": 2325.782831690052,
"count": 63953,
"is_parallel": true,
"self": 915.5571359539904,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019643949999590404,
"count": 1,
"is_parallel": true,
"self": 0.0006075539999983448,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013568409999606956,
"count": 8,
"is_parallel": true,
"self": 0.0013568409999606956
}
}
},
"UnityEnvironment.step": {
"total": 0.048897694000061165,
"count": 1,
"is_parallel": true,
"self": 0.0005532549998861214,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005373049998524948,
"count": 1,
"is_parallel": true,
"self": 0.0005373049998524948
},
"communicator.exchange": {
"total": 0.045739116000277136,
"count": 1,
"is_parallel": true,
"self": 0.045739116000277136
},
"steps_from_proto": {
"total": 0.002068018000045413,
"count": 1,
"is_parallel": true,
"self": 0.0004044610000164539,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001663557000028959,
"count": 8,
"is_parallel": true,
"self": 0.001663557000028959
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1410.2256957360614,
"count": 63952,
"is_parallel": true,
"self": 32.744075600074666,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.96579596899801,
"count": 63952,
"is_parallel": true,
"self": 24.96579596899801
},
"communicator.exchange": {
"total": 1244.520930830016,
"count": 63952,
"is_parallel": true,
"self": 1244.520930830016
},
"steps_from_proto": {
"total": 107.99489333697284,
"count": 63952,
"is_parallel": true,
"self": 22.66314731314924,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.3317460238236,
"count": 511616,
"is_parallel": true,
"self": 85.3317460238236
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 666.5248482419656,
"count": 63953,
"self": 2.8839839530819518,
"children": {
"process_trajectory": {
"total": 120.27906880188357,
"count": 63953,
"self": 119.98947782188361,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2895909799999572,
"count": 2,
"self": 0.2895909799999572
}
}
},
"_update_policy": {
"total": 543.3617954870001,
"count": 447,
"self": 351.05407932495973,
"children": {
"TorchPPOOptimizer.update": {
"total": 192.30771616204038,
"count": 22818,
"self": 192.30771616204038
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2100008461857215e-06,
"count": 1,
"self": 1.2100008461857215e-06
},
"TrainerController._save_models": {
"total": 0.10210546100006468,
"count": 1,
"self": 0.0013672800005224417,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10073818099954224,
"count": 1,
"self": 0.10073818099954224
}
}
}
}
}
}
}