Andreas Doering
First Pyramids
70ed60d
raw
history blame
19.3 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.15441568195819855,
"min": 0.1516423374414444,
"max": 1.4597859382629395,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4615.17578125,
"min": 4510.44970703125,
"max": 44284.06640625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999936.0,
"min": 29952.0,
"max": 2999936.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999936.0,
"min": 29952.0,
"max": 2999936.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.810049831867218,
"min": -0.0967884212732315,
"max": 0.8921647071838379,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 243.01495361328125,
"min": -23.22922134399414,
"max": 279.24755859375,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0036305098328739405,
"min": -0.004926491994410753,
"max": 0.2608664631843567,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.0891529321670532,
"min": -1.364638328552246,
"max": 62.86882019042969,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06602856243262069,
"min": 0.06382281496328124,
"max": 0.07551365262668655,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9243998740566896,
"min": 0.48872275700596635,
"max": 1.0881629512296058,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015093781847445783,
"min": 0.0010006653339106005,
"max": 0.016883016278416492,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21131294586424096,
"min": 0.012474754546420523,
"max": 0.24540752609484082,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5735780469357159e-06,
"min": 1.5735780469357159e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.203009265710002e-05,
"min": 2.203009265710002e-05,
"max": 0.004011187762937432,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10052449285714286,
"min": 0.10052449285714286,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4073429000000002,
"min": 1.3962282666666668,
"max": 2.782402966666667,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.239683642857148e-05,
"min": 6.239683642857148e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008735557100000008,
"min": 0.0008735557100000008,
"max": 0.13371255041,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005970523227006197,
"min": 0.00584232434630394,
"max": 0.3981624245643616,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08358732610940933,
"min": 0.08179254084825516,
"max": 2.787137031555176,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 218.41007194244605,
"min": 197.1744966442953,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30359.0,
"min": 15984.0,
"max": 33339.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7680842665689331,
"min": -1.0000000521540642,
"max": 1.800881930316488,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 247.53179731965065,
"min": -30.533001720905304,
"max": 266.6205985993147,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7680842665689331,
"min": -1.0000000521540642,
"max": 1.800881930316488,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 247.53179731965065,
"min": -30.533001720905304,
"max": 266.6205985993147,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.013451450551240538,
"min": 0.012498853892541066,
"max": 8.155730509199202,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8832030771736754,
"min": 1.7780195862032997,
"max": 130.49168814718723,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657032050",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1657039575"
},
"total": 7524.723942149999,
"count": 1,
"self": 0.4949023749977641,
"children": {
"run_training.setup": {
"total": 0.05055900399997881,
"count": 1,
"self": 0.05055900399997881
},
"TrainerController.start_learning": {
"total": 7524.178480771001,
"count": 1,
"self": 5.81862363124219,
"children": {
"TrainerController._reset_env": {
"total": 6.720530511999868,
"count": 1,
"self": 6.720530511999868
},
"TrainerController.advance": {
"total": 7511.537237164757,
"count": 195594,
"self": 5.96156215357496,
"children": {
"env_step": {
"total": 5190.638659692994,
"count": 195594,
"self": 4799.13823292296,
"children": {
"SubprocessEnvManager._take_step": {
"total": 388.4081261530339,
"count": 195594,
"self": 16.542730059132964,
"children": {
"TorchPolicy.evaluate": {
"total": 371.8653960939009,
"count": 187564,
"self": 124.95421742581198,
"children": {
"TorchPolicy.sample_actions": {
"total": 246.91117866808895,
"count": 187564,
"self": 246.91117866808895
}
}
}
}
},
"workers": {
"total": 3.092300617000319,
"count": 195594,
"self": 0.0,
"children": {
"worker_root": {
"total": 7510.311148789989,
"count": 195594,
"is_parallel": true,
"self": 3085.4773840739563,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002262036000047374,
"count": 1,
"is_parallel": true,
"self": 0.0008680089999870688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001394027000060305,
"count": 8,
"is_parallel": true,
"self": 0.001394027000060305
}
}
},
"UnityEnvironment.step": {
"total": 0.0499415350000163,
"count": 1,
"is_parallel": true,
"self": 0.0005878990000383055,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005298650000895577,
"count": 1,
"is_parallel": true,
"self": 0.0005298650000895577
},
"communicator.exchange": {
"total": 0.0470095579998997,
"count": 1,
"is_parallel": true,
"self": 0.0470095579998997
},
"steps_from_proto": {
"total": 0.0018142129999887402,
"count": 1,
"is_parallel": true,
"self": 0.0005035939998379035,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013106190001508367,
"count": 8,
"is_parallel": true,
"self": 0.0013106190001508367
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4424.833764716032,
"count": 195593,
"is_parallel": true,
"self": 94.69989812376025,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.96354800097606,
"count": 195593,
"is_parallel": true,
"self": 80.96354800097606
},
"communicator.exchange": {
"total": 3922.6214471632575,
"count": 195593,
"is_parallel": true,
"self": 3922.6214471632575
},
"steps_from_proto": {
"total": 326.54887142803886,
"count": 195593,
"is_parallel": true,
"self": 84.68048477549723,
"children": {
"_process_rank_one_or_two_observation": {
"total": 241.86838665254163,
"count": 1564744,
"is_parallel": true,
"self": 241.86838665254163
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2314.937015318188,
"count": 195594,
"self": 10.571558129382538,
"children": {
"process_trajectory": {
"total": 560.1876258258528,
"count": 195594,
"self": 559.5682423958508,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6193834300020171,
"count": 6,
"self": 0.6193834300020171
}
}
},
"_update_policy": {
"total": 1744.1778313629525,
"count": 1397,
"self": 686.6623635858598,
"children": {
"TorchPPOOptimizer.update": {
"total": 1057.5154677770927,
"count": 68352,
"self": 1057.5154677770927
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.396001607645303e-06,
"count": 1,
"self": 1.396001607645303e-06
},
"TrainerController._save_models": {
"total": 0.10208806700029527,
"count": 1,
"self": 0.0019394499995541992,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10014861700074107,
"count": 1,
"self": 0.10014861700074107
}
}
}
}
}
}
}