ppo-Pyramids / run_logs /timers.json
4mosot's picture
First Push
875d5a3
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39078158140182495,
"min": 0.39078158140182495,
"max": 0.959633469581604,
"count": 26
},
"Pyramids.Policy.Entropy.sum": {
"value": 11767.21484375,
"min": 1467.0263671875,
"max": 28742.94140625,
"count": 26
},
"Pyramids.Step.mean": {
"value": 989894.0,
"min": 239955.0,
"max": 989894.0,
"count": 26
},
"Pyramids.Step.sum": {
"value": 989894.0,
"min": 239955.0,
"max": 989894.0,
"count": 26
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.42578125,
"min": -0.11405441910028458,
"max": 0.48761728405952454,
"count": 26
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 111.12890625,
"min": -22.48892593383789,
"max": 133.49281311035156,
"count": 26
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.015334302559494972,
"min": -0.0005214456468820572,
"max": 0.06604637205600739,
"count": 26
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.00225305557251,
"min": -0.1439189910888672,
"max": 12.465400695800781,
"count": 26
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 26
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 26
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06877448264007582,
"min": 0.06547761583860835,
"max": 0.07437893156935199,
"count": 25
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9628427569610615,
"min": 0.7144435800909108,
"max": 1.0666395878846135,
"count": 25
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014782466559844673,
"min": 0.0006058385635895857,
"max": 0.01599865179984287,
"count": 25
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20695453183782542,
"min": 0.007270062763075029,
"max": 0.23746241712069607,
"count": 25
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.2895189987642845e-06,
"min": 7.2895189987642845e-06,
"max": 0.00022304366565211998,
"count": 25
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010205326598269998,
"min": 0.00010205326598269998,
"max": 0.0025510507496499003,
"count": 25
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242980714285717,
"min": 0.10242980714285717,
"max": 0.17434787999999998,
"count": 25
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340173000000003,
"min": 1.4340173000000003,
"max": 2.190688,
"count": 25
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025273773357142854,
"min": 0.00025273773357142854,
"max": 0.007437353212,
"count": 25
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035383282699999997,
"min": 0.0035383282699999997,
"max": 0.08507997499,
"count": 25
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011967487633228302,
"min": 0.011967487633228302,
"max": 0.04896247014403343,
"count": 25
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16754482686519623,
"min": 0.16754482686519623,
"max": 0.4896247088909149,
"count": 25
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 409.95588235294116,
"min": 367.0,
"max": 994.4117647058823,
"count": 25
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27877.0,
"min": 16905.0,
"max": 32634.0,
"count": 25
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3546793781878317,
"min": -0.9292000523119262,
"max": 1.5588987494508426,
"count": 25
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 92.11819771677256,
"min": -30.663601726293564,
"max": 126.27079870551825,
"count": 25
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3546793781878317,
"min": -0.9292000523119262,
"max": 1.5588987494508426,
"count": 25
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 92.11819771677256,
"min": -30.663601726293564,
"max": 126.27079870551825,
"count": 25
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05098443436851513,
"min": 0.05098443436851513,
"max": 0.5046917924746162,
"count": 25
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4669415370590286,
"min": 3.4669415370590286,
"max": 13.512158337049186,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673437582",
"python_version": "3.8.9 (default, Jan 11 2023, 11:43:45) \n[GCC 8.5.0 20210514 (Red Hat 8.5.0-10)]",
"command_line_arguments": "/home/marco/.virtualenvs/ml38/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.2+cu111",
"numpy_version": "1.23.5",
"end_time_seconds": "1673438813"
},
"total": 1230.21500122,
"count": 1,
"self": 0.5250316530000418,
"children": {
"run_training.setup": {
"total": 0.04993825498968363,
"count": 1,
"self": 0.04993825498968363
},
"TrainerController.start_learning": {
"total": 1229.6400313120103,
"count": 1,
"self": 0.873780082212761,
"children": {
"TrainerController._reset_env": {
"total": 16.60682203900069,
"count": 1,
"self": 16.60682203900069
},
"TrainerController.advance": {
"total": 1211.900818651804,
"count": 48449,
"self": 0.8250228467804845,
"children": {
"env_step": {
"total": 724.8020726982213,
"count": 48449,
"self": 653.9688999232894,
"children": {
"SubprocessEnvManager._take_step": {
"total": 70.30755756580038,
"count": 48449,
"self": 2.880844031518791,
"children": {
"TorchPolicy.evaluate": {
"total": 67.42671353428159,
"count": 47609,
"self": 21.31115324224811,
"children": {
"TorchPolicy.sample_actions": {
"total": 46.11556029203348,
"count": 47609,
"self": 46.11556029203348
}
}
}
}
},
"workers": {
"total": 0.5256152091315016,
"count": 48449,
"self": 0.0,
"children": {
"worker_root": {
"total": 1227.7564379225078,
"count": 48449,
"is_parallel": true,
"self": 641.0297609794361,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017990200140047818,
"count": 1,
"is_parallel": true,
"self": 0.0006898560677655041,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011091639462392777,
"count": 8,
"is_parallel": true,
"self": 0.0011091639462392777
}
}
},
"UnityEnvironment.step": {
"total": 0.03202838698052801,
"count": 1,
"is_parallel": true,
"self": 0.0003686369745992124,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003417220141272992,
"count": 1,
"is_parallel": true,
"self": 0.0003417220141272992
},
"communicator.exchange": {
"total": 0.030225806985981762,
"count": 1,
"is_parallel": true,
"self": 0.030225806985981762
},
"steps_from_proto": {
"total": 0.001092221005819738,
"count": 1,
"is_parallel": true,
"self": 0.00031756097450852394,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000774660031311214,
"count": 8,
"is_parallel": true,
"self": 0.000774660031311214
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 586.7266769430717,
"count": 48448,
"is_parallel": true,
"self": 14.195466856646817,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 10.359452535078162,
"count": 48448,
"is_parallel": true,
"self": 10.359452535078162
},
"communicator.exchange": {
"total": 519.3050258742296,
"count": 48448,
"is_parallel": true,
"self": 519.3050258742296
},
"steps_from_proto": {
"total": 42.86673167711706,
"count": 48448,
"is_parallel": true,
"self": 10.673247159080347,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.19348451803671,
"count": 387584,
"is_parallel": true,
"self": 32.19348451803671
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 486.2737231068022,
"count": 48449,
"self": 1.542325942980824,
"children": {
"process_trajectory": {
"total": 103.32331446980243,
"count": 48449,
"self": 102.71449498078437,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6088194890180603,
"count": 2,
"self": 0.6088194890180603
}
}
},
"_update_policy": {
"total": 381.40808269401896,
"count": 344,
"self": 134.51145814190386,
"children": {
"TorchPPOOptimizer.update": {
"total": 246.8966245521151,
"count": 17316,
"self": 246.8966245521151
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.709910955280066e-07,
"count": 1,
"self": 9.709910955280066e-07
},
"TrainerController._save_models": {
"total": 0.25860956800170243,
"count": 1,
"self": 0.014524578000418842,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24408499000128359,
"count": 1,
"self": 0.24408499000128359
}
}
}
}
}
}
}