ppo-PyramidsRND / run_logs /timers.json
fermaat's picture
PyramidsRND first attempt
675af7d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5887473225593567,
"min": 0.5887473225593567,
"max": 1.4489712715148926,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17784.87890625,
"min": 17784.87890625,
"max": 43955.9921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989938.0,
"min": 29952.0,
"max": 989938.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989938.0,
"min": 29952.0,
"max": 989938.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.16798213124275208,
"min": -0.0986766442656517,
"max": 0.25425076484680176,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 42.66746139526367,
"min": -23.68239402770996,
"max": 65.34244537353516,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.027209730818867683,
"min": 0.0034303925931453705,
"max": 0.4191873371601105,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.911271572113037,
"min": 0.8713197112083435,
"max": 99.34739685058594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06926785528776237,
"min": 0.06681098775811677,
"max": 0.07294155296045493,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9697499740286731,
"min": 0.5105908707231845,
"max": 1.0754790769230265,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012299207133331772,
"min": 0.0005944496037608161,
"max": 0.012804289803765216,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17218889986664482,
"min": 0.006538945641368977,
"max": 0.18636405673007378,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.449290374078574e-06,
"min": 7.449290374078574e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010429006523710004,
"min": 0.00010429006523710004,
"max": 0.003633102488965899,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248306428571431,
"min": 0.10248306428571431,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347629000000004,
"min": 1.3886848,
"max": 2.6110341000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002580581221428572,
"min": 0.0002580581221428572,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003612813710000001,
"min": 0.003612813710000001,
"max": 0.12112230659,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008309068158268929,
"min": 0.008309068158268929,
"max": 0.7043861746788025,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1163269504904747,
"min": 0.1163269504904747,
"max": 4.930703163146973,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 650.7708333333334,
"min": 545.6296296296297,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31237.0,
"min": 15984.0,
"max": 33036.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.6823749650890628,
"min": -1.0000000521540642,
"max": 1.046822194699888,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 32.75399832427502,
"min": -31.998801663517952,
"max": 56.528398513793945,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.6823749650890628,
"min": -1.0000000521540642,
"max": 1.046822194699888,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 32.75399832427502,
"min": -31.998801663517952,
"max": 56.528398513793945,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.054737693156008994,
"min": 0.05135660698147991,
"max": 14.604621484875679,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.627409271488432,
"min": 2.608207010096521,
"max": 233.67394375801086,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675362459",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675364397"
},
"total": 1938.2921379189997,
"count": 1,
"self": 0.4403622379998069,
"children": {
"run_training.setup": {
"total": 0.09516950100010035,
"count": 1,
"self": 0.09516950100010035
},
"TrainerController.start_learning": {
"total": 1937.7566061799998,
"count": 1,
"self": 1.2149356870136216,
"children": {
"TrainerController._reset_env": {
"total": 5.892959359000088,
"count": 1,
"self": 5.892959359000088
},
"TrainerController.advance": {
"total": 1930.5673518879862,
"count": 63505,
"self": 1.1729644860743065,
"children": {
"env_step": {
"total": 1295.0771217759645,
"count": 63505,
"self": 1194.9007421118647,
"children": {
"SubprocessEnvManager._take_step": {
"total": 99.43894389006141,
"count": 63505,
"self": 4.040880972070454,
"children": {
"TorchPolicy.evaluate": {
"total": 95.39806291799096,
"count": 62551,
"self": 31.948155222000423,
"children": {
"TorchPolicy.sample_actions": {
"total": 63.44990769599053,
"count": 62551,
"self": 63.44990769599053
}
}
}
}
},
"workers": {
"total": 0.7374357740384312,
"count": 63505,
"self": 0.0,
"children": {
"worker_root": {
"total": 1934.0037313000432,
"count": 63505,
"is_parallel": true,
"self": 832.8391381410568,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016704760000720853,
"count": 1,
"is_parallel": true,
"self": 0.000585913999884724,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010845620001873613,
"count": 8,
"is_parallel": true,
"self": 0.0010845620001873613
}
}
},
"UnityEnvironment.step": {
"total": 0.05270115700000133,
"count": 1,
"is_parallel": true,
"self": 0.00045832100022380473,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043475799998304865,
"count": 1,
"is_parallel": true,
"self": 0.00043475799998304865
},
"communicator.exchange": {
"total": 0.05027758099981838,
"count": 1,
"is_parallel": true,
"self": 0.05027758099981838
},
"steps_from_proto": {
"total": 0.0015304969999760942,
"count": 1,
"is_parallel": true,
"self": 0.0004139210000175808,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011165759999585134,
"count": 8,
"is_parallel": true,
"self": 0.0011165759999585134
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1101.1645931589865,
"count": 63504,
"is_parallel": true,
"self": 26.675509281101995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.881992911926318,
"count": 63504,
"is_parallel": true,
"self": 20.881992911926318
},
"communicator.exchange": {
"total": 960.549131236919,
"count": 63504,
"is_parallel": true,
"self": 960.549131236919
},
"steps_from_proto": {
"total": 93.05795972903911,
"count": 63504,
"is_parallel": true,
"self": 20.484090245141715,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.5738694838974,
"count": 508032,
"is_parallel": true,
"self": 72.5738694838974
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 634.3172656259474,
"count": 63505,
"self": 2.2933510589264188,
"children": {
"process_trajectory": {
"total": 137.1191526970208,
"count": 63505,
"self": 136.9351360140206,
"children": {
"RLTrainer._checkpoint": {
"total": 0.184016683000209,
"count": 2,
"self": 0.184016683000209
}
}
},
"_update_policy": {
"total": 494.90476187000013,
"count": 453,
"self": 184.21105820402113,
"children": {
"TorchPPOOptimizer.update": {
"total": 310.693703665979,
"count": 22833,
"self": 310.693703665979
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.850001904647797e-07,
"count": 1,
"self": 8.850001904647797e-07
},
"TrainerController._save_models": {
"total": 0.08135836099972948,
"count": 1,
"self": 0.0017721299996082962,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07958623100012119,
"count": 1,
"self": 0.07958623100012119
}
}
}
}
}
}
}