ppo-Pyramids / run_logs /timers.json
ghassenhannachi's picture
First Push
0505484
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4537639319896698,
"min": 0.4537639319896698,
"max": 1.3673372268676758,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13714.5615234375,
"min": 13714.5615234375,
"max": 41479.54296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5976657867431641,
"min": -0.10791371017694473,
"max": 0.6335031986236572,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 167.944091796875,
"min": -25.899290084838867,
"max": 179.28140258789062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.015805231407284737,
"min": -0.015805231407284737,
"max": 0.3443756103515625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -4.441269874572754,
"min": -4.441269874572754,
"max": 82.650146484375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0673594766662323,
"min": 0.0651072022031128,
"max": 0.072715357330315,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9430326733272523,
"min": 0.506749048312402,
"max": 1.0702600376292442,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014565963086733108,
"min": 0.0001693481555035095,
"max": 0.015970475410492128,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2039234832142635,
"min": 0.0017263884739837055,
"max": 0.22358665574688977,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.61789031787857e-06,
"min": 7.61789031787857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010665046445029999,
"min": 0.00010665046445029999,
"max": 0.0031175821608059994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253926428571432,
"min": 0.10253926428571432,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355497000000004,
"min": 1.3691136000000002,
"max": 2.4001478,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026367250214285714,
"min": 0.00026367250214285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00369141503,
"min": 0.00369141503,
"max": 0.10393548059999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009322361089289188,
"min": 0.009322361089289188,
"max": 0.47595882415771484,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1305130571126938,
"min": 0.1305130571126938,
"max": 3.331711769104004,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 339.3804347826087,
"min": 303.2783505154639,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31223.0,
"min": 15984.0,
"max": 34066.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6157260682919752,
"min": -1.0000000521540642,
"max": 1.695727255910334,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 148.6467982828617,
"min": -32.000001668930054,
"max": 167.87699833512306,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6157260682919752,
"min": -1.0000000521540642,
"max": 1.695727255910334,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 148.6467982828617,
"min": -32.000001668930054,
"max": 167.87699833512306,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0328367138800272,
"min": 0.030092580188089284,
"max": 9.556122034788132,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0209776769625023,
"min": 2.913445245809271,
"max": 152.8979525566101,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680008912",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680011108"
},
"total": 2195.6978196200002,
"count": 1,
"self": 0.6909038849998979,
"children": {
"run_training.setup": {
"total": 0.16362891200014928,
"count": 1,
"self": 0.16362891200014928
},
"TrainerController.start_learning": {
"total": 2194.843286823,
"count": 1,
"self": 1.3975905629840781,
"children": {
"TrainerController._reset_env": {
"total": 7.085317575999852,
"count": 1,
"self": 7.085317575999852
},
"TrainerController.advance": {
"total": 2186.264137563017,
"count": 63882,
"self": 1.3814049941142912,
"children": {
"env_step": {
"total": 1560.813430597882,
"count": 63882,
"self": 1453.565284748015,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.399087346741,
"count": 63882,
"self": 4.600275104764478,
"children": {
"TorchPolicy.evaluate": {
"total": 101.79881224197652,
"count": 62555,
"self": 101.79881224197652
}
}
},
"workers": {
"total": 0.8490585031258888,
"count": 63882,
"self": 0.0,
"children": {
"worker_root": {
"total": 2190.036245876132,
"count": 63882,
"is_parallel": true,
"self": 851.927113911262,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022515549999297946,
"count": 1,
"is_parallel": true,
"self": 0.0006295250000221131,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016220299999076815,
"count": 8,
"is_parallel": true,
"self": 0.0016220299999076815
}
}
},
"UnityEnvironment.step": {
"total": 0.048131857000043965,
"count": 1,
"is_parallel": true,
"self": 0.0005317830004969437,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043582399985098164,
"count": 1,
"is_parallel": true,
"self": 0.00043582399985098164
},
"communicator.exchange": {
"total": 0.04541343799974129,
"count": 1,
"is_parallel": true,
"self": 0.04541343799974129
},
"steps_from_proto": {
"total": 0.001750811999954749,
"count": 1,
"is_parallel": true,
"self": 0.0003604320008889772,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013903799990657717,
"count": 8,
"is_parallel": true,
"self": 0.0013903799990657717
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1338.10913196487,
"count": 63881,
"is_parallel": true,
"self": 30.72603681404462,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.702562970860527,
"count": 63881,
"is_parallel": true,
"self": 22.702562970860527
},
"communicator.exchange": {
"total": 1193.9199669830423,
"count": 63881,
"is_parallel": true,
"self": 1193.9199669830423
},
"steps_from_proto": {
"total": 90.76056519692247,
"count": 63881,
"is_parallel": true,
"self": 19.194448972039027,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.56611622488344,
"count": 511048,
"is_parallel": true,
"self": 71.56611622488344
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 624.0693019710206,
"count": 63882,
"self": 2.4629538388803667,
"children": {
"process_trajectory": {
"total": 117.5448049291349,
"count": 63882,
"self": 117.34824453913416,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19656039000074088,
"count": 2,
"self": 0.19656039000074088
}
}
},
"_update_policy": {
"total": 504.0615432030054,
"count": 437,
"self": 322.00474077307535,
"children": {
"TorchPPOOptimizer.update": {
"total": 182.05680242993003,
"count": 22800,
"self": 182.05680242993003
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.420000424142927e-07,
"count": 1,
"self": 9.420000424142927e-07
},
"TrainerController._save_models": {
"total": 0.09624017899932369,
"count": 1,
"self": 0.0014051909993213485,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09483498800000234,
"count": 1,
"self": 0.09483498800000234
}
}
}
}
}
}
}