Pyramids / run_logs /timers.json
amine-01's picture
First Push
edfbcb5 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5140262246131897,
"min": 0.5140262246131897,
"max": 1.4391610622406006,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15585.2744140625,
"min": 15585.2744140625,
"max": 43658.390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989899.0,
"min": 29975.0,
"max": 989899.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989899.0,
"min": 29975.0,
"max": 989899.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.40128517150878906,
"min": -0.09970172494649887,
"max": 0.42190641164779663,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 106.74185180664062,
"min": -24.127817153930664,
"max": 114.33663940429688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.07886388152837753,
"min": -0.020522261038422585,
"max": 0.3121149241924286,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 20.977792739868164,
"min": -5.335787773132324,
"max": 75.21969604492188,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06832461333501019,
"min": 0.06401531374389638,
"max": 0.07179197058836248,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9565445866901428,
"min": 0.565873657516806,
"max": 1.0406451638700673,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018559521576944026,
"min": 0.0007753290539792149,
"max": 0.018559521576944026,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2598333020772164,
"min": 0.010079277701729793,
"max": 0.2598333020772164,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.534168917214288e-06,
"min": 7.534168917214288e-06,
"max": 0.000294765526744825,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010547836484100003,
"min": 0.00010547836484100003,
"max": 0.0035086580304474,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251135714285713,
"min": 0.10251135714285713,
"max": 0.198255175,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351589999999999,
"min": 1.4351589999999999,
"max": 2.5695526000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026088457857142863,
"min": 0.00026088457857142863,
"max": 0.009825691982499999,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003652384100000001,
"min": 0.003652384100000001,
"max": 0.11697830474,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008880306966602802,
"min": 0.008880306966602802,
"max": 0.45677196979522705,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12432429939508438,
"min": 0.12432429939508438,
"max": 3.6541757583618164,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 422.8493150684931,
"min": 404.93150684931504,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30868.0,
"min": 17541.0,
"max": 32625.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3578821723795917,
"min": -0.999858116430621,
"max": 1.484681057567532,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 99.1253985837102,
"min": -30.99560160934925,
"max": 109.86639825999737,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3578821723795917,
"min": -0.999858116430621,
"max": 1.484681057567532,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 99.1253985837102,
"min": -30.99560160934925,
"max": 109.86639825999737,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03866022539505263,
"min": 0.03866022539505263,
"max": 9.29746906997429,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.822196453838842,
"min": 2.666131267826131,
"max": 167.35444325953722,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713524202",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713526421"
},
"total": 2219.277005191,
"count": 1,
"self": 0.5868690130000687,
"children": {
"run_training.setup": {
"total": 0.05304830399995808,
"count": 1,
"self": 0.05304830399995808
},
"TrainerController.start_learning": {
"total": 2218.6370878740004,
"count": 1,
"self": 1.9633150509744155,
"children": {
"TrainerController._reset_env": {
"total": 2.8526793159999784,
"count": 1,
"self": 2.8526793159999784
},
"TrainerController.advance": {
"total": 2213.762012238026,
"count": 63547,
"self": 1.8097842970810234,
"children": {
"env_step": {
"total": 1468.5785473689552,
"count": 63547,
"self": 1339.792102782948,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.56214935500276,
"count": 63547,
"self": 4.695721017019139,
"children": {
"TorchPolicy.evaluate": {
"total": 122.86642833798362,
"count": 62542,
"self": 122.86642833798362
}
}
},
"workers": {
"total": 1.2242952310044188,
"count": 63547,
"self": 0.0,
"children": {
"worker_root": {
"total": 2214.347077796005,
"count": 63547,
"is_parallel": true,
"self": 1014.8150732889999,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007910960000003797,
"count": 1,
"is_parallel": true,
"self": 0.004439000000047599,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003471959999956198,
"count": 8,
"is_parallel": true,
"self": 0.003471959999956198
}
}
},
"UnityEnvironment.step": {
"total": 0.054862544000002345,
"count": 1,
"is_parallel": true,
"self": 0.0006854300000327385,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004128900000068825,
"count": 1,
"is_parallel": true,
"self": 0.0004128900000068825
},
"communicator.exchange": {
"total": 0.05200017399999979,
"count": 1,
"is_parallel": true,
"self": 0.05200017399999979
},
"steps_from_proto": {
"total": 0.0017640499999629355,
"count": 1,
"is_parallel": true,
"self": 0.0003627499999652173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014012999999977183,
"count": 8,
"is_parallel": true,
"self": 0.0014012999999977183
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1199.5320045070052,
"count": 63546,
"is_parallel": true,
"self": 38.25169981208319,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 19.69471548899321,
"count": 63546,
"is_parallel": true,
"self": 19.69471548899321
},
"communicator.exchange": {
"total": 1046.9141536519196,
"count": 63546,
"is_parallel": true,
"self": 1046.9141536519196
},
"steps_from_proto": {
"total": 94.67143555400924,
"count": 63546,
"is_parallel": true,
"self": 20.852647415105707,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.81878813890353,
"count": 508368,
"is_parallel": true,
"self": 73.81878813890353
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 743.3736805719893,
"count": 63547,
"self": 4.151871275003259,
"children": {
"process_trajectory": {
"total": 124.56097160698329,
"count": 63547,
"self": 124.37023336798308,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19073823900021125,
"count": 2,
"self": 0.19073823900021125
}
}
},
"_update_policy": {
"total": 614.6608376900028,
"count": 455,
"self": 244.79770379598511,
"children": {
"TorchPPOOptimizer.update": {
"total": 369.8631338940177,
"count": 22815,
"self": 369.8631338940177
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.000000318337698e-07,
"count": 1,
"self": 9.000000318337698e-07
},
"TrainerController._save_models": {
"total": 0.059080369000184874,
"count": 1,
"self": 0.00183022000010169,
"children": {
"RLTrainer._checkpoint": {
"total": 0.057250149000083184,
"count": 1,
"self": 0.057250149000083184
}
}
}
}
}
}
}