ppo-Pyramids / run_logs /timers.json
msgerasyov's picture
First Push
4dd6c94
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5551222562789917,
"min": 0.5490848422050476,
"max": 1.4759281873703003,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16671.431640625,
"min": 16437.404296875,
"max": 44773.7578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989959.0,
"min": 29952.0,
"max": 989959.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989959.0,
"min": 29952.0,
"max": 989959.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2164200395345688,
"min": -0.1294645369052887,
"max": 0.23111198842525482,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 55.61994934082031,
"min": -31.20095443725586,
"max": 58.70244598388672,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.0866844654083252,
"min": -0.17487850785255432,
"max": 1.0866844654083252,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 279.27789306640625,
"min": -44.2442626953125,
"max": 279.27789306640625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06642980320648653,
"min": 0.06551795266399943,
"max": 0.07252054497006437,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9300172448908115,
"min": 0.49560066601214664,
"max": 1.0379111801374556,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.38045148569309534,
"min": 0.00026295054614434136,
"max": 0.38045148569309534,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 5.326320799703335,
"min": 0.002892456007587755,
"max": 5.326320799703335,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.691476007635714e-06,
"min": 7.691476007635714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001076806641069,
"min": 0.0001076806641069,
"max": 0.0030057385980871995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256379285714287,
"min": 0.10256379285714287,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358931000000001,
"min": 1.3691136000000002,
"max": 2.3592626,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002661229064285715,
"min": 0.0002661229064285715,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003725720690000001,
"min": 0.003725720690000001,
"max": 0.10021108872000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009795235469937325,
"min": 0.009795235469937325,
"max": 0.38168540596961975,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13713330030441284,
"min": 0.13713330030441284,
"max": 2.671797752380371,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 614.804347826087,
"min": 614.804347826087,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28281.0,
"min": 15984.0,
"max": 32792.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.080765193409246,
"min": -1.0000000521540642,
"max": 1.080765193409246,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 49.715198896825314,
"min": -32.000001668930054,
"max": 49.715198896825314,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.080765193409246,
"min": -1.0000000521540642,
"max": 1.080765193409246,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 49.715198896825314,
"min": -32.000001668930054,
"max": 49.715198896825314,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06306468596385853,
"min": 0.06306468596385853,
"max": 8.564095130190253,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.900975554337492,
"min": 2.900975554337492,
"max": 137.02552208304405,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673691494",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673693665"
},
"total": 2171.120323021,
"count": 1,
"self": 0.4861872060000678,
"children": {
"run_training.setup": {
"total": 0.12304267400031677,
"count": 1,
"self": 0.12304267400031677
},
"TrainerController.start_learning": {
"total": 2170.5110931409995,
"count": 1,
"self": 1.64530409004783,
"children": {
"TrainerController._reset_env": {
"total": 6.712525835999713,
"count": 1,
"self": 6.712525835999713
},
"TrainerController.advance": {
"total": 2162.0596962899517,
"count": 63244,
"self": 1.6645163929092632,
"children": {
"env_step": {
"total": 1450.1981623770807,
"count": 63244,
"self": 1323.1891909531696,
"children": {
"SubprocessEnvManager._take_step": {
"total": 125.96206249498755,
"count": 63244,
"self": 4.858351079989006,
"children": {
"TorchPolicy.evaluate": {
"total": 121.10371141499854,
"count": 62548,
"self": 40.51527686292184,
"children": {
"TorchPolicy.sample_actions": {
"total": 80.5884345520767,
"count": 62548,
"self": 80.5884345520767
}
}
}
}
},
"workers": {
"total": 1.0469089289235853,
"count": 63244,
"self": 0.0,
"children": {
"worker_root": {
"total": 2165.164056982972,
"count": 63244,
"is_parallel": true,
"self": 958.6815381360075,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019716920000973914,
"count": 1,
"is_parallel": true,
"self": 0.0006842650004728057,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012874269996245857,
"count": 8,
"is_parallel": true,
"self": 0.0012874269996245857
}
}
},
"UnityEnvironment.step": {
"total": 0.0515767820002111,
"count": 1,
"is_parallel": true,
"self": 0.0005399389997364779,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004564000000755186,
"count": 1,
"is_parallel": true,
"self": 0.0004564000000755186
},
"communicator.exchange": {
"total": 0.046458824000183085,
"count": 1,
"is_parallel": true,
"self": 0.046458824000183085
},
"steps_from_proto": {
"total": 0.004121619000216015,
"count": 1,
"is_parallel": true,
"self": 0.000590620999901148,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0035309980003148667,
"count": 8,
"is_parallel": true,
"self": 0.0035309980003148667
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1206.4825188469645,
"count": 63243,
"is_parallel": true,
"self": 31.29584732699641,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.96892895300016,
"count": 63243,
"is_parallel": true,
"self": 23.96892895300016
},
"communicator.exchange": {
"total": 1040.7340392059841,
"count": 63243,
"is_parallel": true,
"self": 1040.7340392059841
},
"steps_from_proto": {
"total": 110.48370336098378,
"count": 63243,
"is_parallel": true,
"self": 25.41156996181553,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.07213339916825,
"count": 505944,
"is_parallel": true,
"self": 85.07213339916825
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 710.1970175199617,
"count": 63244,
"self": 2.963622484923235,
"children": {
"process_trajectory": {
"total": 154.54575871003726,
"count": 63244,
"self": 154.33562215503753,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21013655499973538,
"count": 2,
"self": 0.21013655499973538
}
}
},
"_update_policy": {
"total": 552.6876363250012,
"count": 434,
"self": 210.7421391739772,
"children": {
"TorchPPOOptimizer.update": {
"total": 341.94549715102403,
"count": 22836,
"self": 341.94549715102403
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.209998097503558e-07,
"count": 1,
"self": 9.209998097503558e-07
},
"TrainerController._save_models": {
"total": 0.09356600400042225,
"count": 1,
"self": 0.0015587990001222352,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09200720500030002,
"count": 1,
"self": 0.09200720500030002
}
}
}
}
}
}
}