ppo-PyramidsRND / run_logs /timers.json
giggling-squid's picture
First Push
62bee03
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3850148916244507,
"min": 0.3736658990383148,
"max": 1.403730034828186,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11556.607421875,
"min": 11078.4462890625,
"max": 42583.5546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989963.0,
"min": 29915.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989963.0,
"min": 29915.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5188890695571899,
"min": -0.12454988062381744,
"max": 0.5331054329872131,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 142.69448852539062,
"min": -29.891971588134766,
"max": 146.60398864746094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.023866167291998863,
"min": -0.0111561119556427,
"max": 0.43092843890190125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.563196182250977,
"min": -3.0344624519348145,
"max": 102.13004302978516,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07123437206365536,
"min": 0.06470598120081593,
"max": 0.0735213648708951,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.997281208891175,
"min": 0.5696150898927757,
"max": 1.1028204730634266,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01580614767472096,
"min": 0.001137092577986113,
"max": 0.01677967188264346,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22128606744609344,
"min": 0.015919296091805583,
"max": 0.23491540635700847,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.42351181124286e-06,
"min": 7.42351181124286e-06,
"max": 0.0002948428892190375,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010392916535740004,
"min": 0.00010392916535740004,
"max": 0.003724224658591799,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247447142857144,
"min": 0.10247447142857144,
"max": 0.1982809625,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346426,
"min": 1.4346426,
"max": 2.617613000000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002571996957142858,
"min": 0.0002571996957142858,
"max": 0.00982826815375,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036007957400000018,
"min": 0.0036007957400000018,
"max": 0.12414667918000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014671503566205502,
"min": 0.014671503566205502,
"max": 0.5129152536392212,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20540104806423187,
"min": 0.20540104806423187,
"max": 4.1033220291137695,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 348.26506024096386,
"min": 348.26506024096386,
"max": 995.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28906.0,
"min": 17097.0,
"max": 32424.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5553204632667175,
"min": -0.9333813029807061,
"max": 1.6233633898380326,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 129.09159845113754,
"min": -29.868201695382595,
"max": 133.73779855668545,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5553204632667175,
"min": -0.9333813029807061,
"max": 1.6233633898380326,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 129.09159845113754,
"min": -29.868201695382595,
"max": 133.73779855668545,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05288865482638007,
"min": 0.05288865482638007,
"max": 10.20352097021209,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.389758350589545,
"min": 4.389758350589545,
"max": 183.6633774638176,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680954774",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/home/sicong/Data/Projects/HuggingFace_DeepReinforcementLearning/venv_shared/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680956032"
},
"total": 1258.4368059220142,
"count": 1,
"self": 0.21992897504242137,
"children": {
"run_training.setup": {
"total": 0.019899851991795003,
"count": 1,
"self": 0.019899851991795003
},
"TrainerController.start_learning": {
"total": 1258.19697709498,
"count": 1,
"self": 1.0607770374044776,
"children": {
"TrainerController._reset_env": {
"total": 2.846834211028181,
"count": 1,
"self": 2.846834211028181
},
"TrainerController.advance": {
"total": 1254.2096045695362,
"count": 63903,
"self": 0.9535994690959342,
"children": {
"env_step": {
"total": 793.6582737493445,
"count": 63903,
"self": 684.1445936444798,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.85136416344903,
"count": 63903,
"self": 2.6127780139795505,
"children": {
"TorchPolicy.evaluate": {
"total": 106.23858614946948,
"count": 62560,
"self": 106.23858614946948
}
}
},
"workers": {
"total": 0.6623159414157271,
"count": 63903,
"self": 0.0,
"children": {
"worker_root": {
"total": 1256.6302440160653,
"count": 63903,
"is_parallel": true,
"self": 637.8741833117092,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011611069785431027,
"count": 1,
"is_parallel": true,
"self": 0.00037755194352939725,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007835550350137055,
"count": 8,
"is_parallel": true,
"self": 0.0007835550350137055
}
}
},
"UnityEnvironment.step": {
"total": 0.02313803305150941,
"count": 1,
"is_parallel": true,
"self": 0.0002915430814027786,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002708919928409159,
"count": 1,
"is_parallel": true,
"self": 0.0002708919928409159
},
"communicator.exchange": {
"total": 0.021812483959365636,
"count": 1,
"is_parallel": true,
"self": 0.021812483959365636
},
"steps_from_proto": {
"total": 0.0007631140179000795,
"count": 1,
"is_parallel": true,
"self": 0.00017198000568896532,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005911340122111142,
"count": 8,
"is_parallel": true,
"self": 0.0005911340122111142
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 618.7560607043561,
"count": 63902,
"is_parallel": true,
"self": 20.38584548048675,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.031576734676491,
"count": 63902,
"is_parallel": true,
"self": 14.031576734676491
},
"communicator.exchange": {
"total": 535.8076451613451,
"count": 63902,
"is_parallel": true,
"self": 535.8076451613451
},
"steps_from_proto": {
"total": 48.53099332784768,
"count": 63902,
"is_parallel": true,
"self": 9.902067869785242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 38.62892545806244,
"count": 511216,
"is_parallel": true,
"self": 38.62892545806244
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 459.5977313510957,
"count": 63903,
"self": 1.9296662296401337,
"children": {
"process_trajectory": {
"total": 81.51492056960706,
"count": 63903,
"self": 81.37069327553036,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14422729407669976,
"count": 2,
"self": 0.14422729407669976
}
}
},
"_update_policy": {
"total": 376.1531445518485,
"count": 462,
"self": 226.39872767933412,
"children": {
"TorchPPOOptimizer.update": {
"total": 149.7544168725144,
"count": 22758,
"self": 149.7544168725144
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.100162863731384e-07,
"count": 1,
"self": 6.100162863731384e-07
},
"TrainerController._save_models": {
"total": 0.07976066699484363,
"count": 1,
"self": 0.0008779889903962612,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07888267800444737,
"count": 1,
"self": 0.07888267800444737
}
}
}
}
}
}
}