ppo-PyramidsRND / run_logs /timers.json
jenya-g's picture
first
6dfc3b8
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4373374283313751,
"min": 0.4373374283313751,
"max": 1.3828552961349487,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13246.076171875,
"min": 13246.076171875,
"max": 41950.296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989883.0,
"min": 29999.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989883.0,
"min": 29999.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4891029894351959,
"min": -0.09020061045885086,
"max": 0.6365716457366943,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 130.1013946533203,
"min": -21.828548431396484,
"max": 183.3326416015625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.048941098153591156,
"min": -0.048941098153591156,
"max": 0.37877002358436584,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -13.018332481384277,
"min": -13.018332481384277,
"max": 90.14726257324219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06822469798914556,
"min": 0.06464895404449233,
"max": 0.07416838001158864,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9551457718480378,
"min": 0.5869756277689735,
"max": 1.0502334930968251,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014525838625905592,
"min": 0.0011728096766552994,
"max": 0.016486373742823378,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2033617407626783,
"min": 0.01641933547317419,
"max": 0.23160827474202964,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.481861791792858e-06,
"min": 7.481861791792858e-06,
"max": 0.00029476297674567496,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001047460650851,
"min": 0.0001047460650851,
"max": 0.0036343006885665,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249392142857146,
"min": 0.10249392142857146,
"max": 0.198254325,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4349149000000003,
"min": 1.4349149000000003,
"max": 2.6114335,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002591427507142858,
"min": 0.0002591427507142858,
"max": 0.009825607067499999,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003627998510000001,
"min": 0.003627998510000001,
"max": 0.12116220665,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008063855580985546,
"min": 0.008063855580985546,
"max": 0.406576007604599,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1128939762711525,
"min": 0.1128939762711525,
"max": 3.252608060836792,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 397.1333333333333,
"min": 294.7113402061856,
"max": 988.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29785.0,
"min": 17437.0,
"max": 32910.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3894666435321172,
"min": -0.8614437992218882,
"max": 1.6561379090979182,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 104.20999826490879,
"min": -27.566201575100422,
"max": 160.54179845750332,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3894666435321172,
"min": -0.8614437992218882,
"max": 1.6561379090979182,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 104.20999826490879,
"min": -27.566201575100422,
"max": 160.54179845750332,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03451023576237882,
"min": 0.027286063975949144,
"max": 7.722050276895364,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.5882676821784116,
"min": 2.527639201070997,
"max": 138.99690498411655,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674034331",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674036519"
},
"total": 2188.7614649880006,
"count": 1,
"self": 0.4432769940003709,
"children": {
"run_training.setup": {
"total": 0.12702667200005635,
"count": 1,
"self": 0.12702667200005635
},
"TrainerController.start_learning": {
"total": 2188.191161322,
"count": 1,
"self": 1.202378460966429,
"children": {
"TrainerController._reset_env": {
"total": 6.287917343000117,
"count": 1,
"self": 6.287917343000117
},
"TrainerController.advance": {
"total": 2180.615527025033,
"count": 63910,
"self": 1.2253711531620866,
"children": {
"env_step": {
"total": 1508.8387061199205,
"count": 63910,
"self": 1407.0341820169178,
"children": {
"SubprocessEnvManager._take_step": {
"total": 101.0686858690151,
"count": 63910,
"self": 4.217618594031137,
"children": {
"TorchPolicy.evaluate": {
"total": 96.85106727498396,
"count": 62561,
"self": 32.782131168993146,
"children": {
"TorchPolicy.sample_actions": {
"total": 64.06893610599082,
"count": 62561,
"self": 64.06893610599082
}
}
}
}
},
"workers": {
"total": 0.7358382339875789,
"count": 63910,
"self": 0.0,
"children": {
"worker_root": {
"total": 2183.722967143883,
"count": 63910,
"is_parallel": true,
"self": 873.2771783049241,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018528010000409267,
"count": 1,
"is_parallel": true,
"self": 0.0006990250001308596,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011537759999100672,
"count": 8,
"is_parallel": true,
"self": 0.0011537759999100672
}
}
},
"UnityEnvironment.step": {
"total": 0.046326847000045746,
"count": 1,
"is_parallel": true,
"self": 0.00050268099994355,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004745530000036524,
"count": 1,
"is_parallel": true,
"self": 0.0004745530000036524
},
"communicator.exchange": {
"total": 0.043726396000010936,
"count": 1,
"is_parallel": true,
"self": 0.043726396000010936
},
"steps_from_proto": {
"total": 0.0016232170000876067,
"count": 1,
"is_parallel": true,
"self": 0.0003801480004312907,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001243068999656316,
"count": 8,
"is_parallel": true,
"self": 0.001243068999656316
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1310.445788838959,
"count": 63909,
"is_parallel": true,
"self": 28.151701304915377,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.246336114978703,
"count": 63909,
"is_parallel": true,
"self": 22.246336114978703
},
"communicator.exchange": {
"total": 1161.9887209290075,
"count": 63909,
"is_parallel": true,
"self": 1161.9887209290075
},
"steps_from_proto": {
"total": 98.05903049005747,
"count": 63909,
"is_parallel": true,
"self": 21.388863524164208,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.67016696589326,
"count": 511272,
"is_parallel": true,
"self": 76.67016696589326
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 670.5514497519505,
"count": 63910,
"self": 2.2265912289567495,
"children": {
"process_trajectory": {
"total": 147.30195031299968,
"count": 63910,
"self": 147.11662121099903,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1853291020006509,
"count": 2,
"self": 0.1853291020006509
}
}
},
"_update_policy": {
"total": 521.0229082099941,
"count": 459,
"self": 198.27613544599762,
"children": {
"TorchPPOOptimizer.update": {
"total": 322.74677276399643,
"count": 22734,
"self": 322.74677276399643
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.190007403958589e-07,
"count": 1,
"self": 9.190007403958589e-07
},
"TrainerController._save_models": {
"total": 0.08533757399982278,
"count": 1,
"self": 0.0014087740000832127,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08392879999973957,
"count": 1,
"self": 0.08392879999973957
}
}
}
}
}
}
}