ppo-PyramidsRND / run_logs /timers.json
lunared473's picture
First commit
7bb06fa
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.51961350440979,
"min": 0.51961350440979,
"max": 1.447527527809143,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15496.953125,
"min": 15496.953125,
"max": 43912.1953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989933.0,
"min": 29952.0,
"max": 989933.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989933.0,
"min": 29952.0,
"max": 989933.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5899342894554138,
"min": -0.09063611924648285,
"max": 0.5940611362457275,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 164.00172424316406,
"min": -21.752668380737305,
"max": 167.52523803710938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.016343796625733376,
"min": -0.022797556594014168,
"max": 0.4818542003631592,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -4.543575286865234,
"min": -5.836174488067627,
"max": 114.19944763183594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06795133343326772,
"min": 0.06438576406729962,
"max": 0.07458411097317806,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9513186680657479,
"min": 0.5060134941187484,
"max": 1.0623444213415496,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015654930453387176,
"min": 0.00040046871557183883,
"max": 0.01655174326993338,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21916902634742047,
"min": 0.0048048470815768365,
"max": 0.2317244057790673,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.721861711792858e-06,
"min": 7.721861711792858e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001081060639651,
"min": 0.0001081060639651,
"max": 0.0032561651146116996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257392142857144,
"min": 0.10257392142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4360349000000001,
"min": 1.3691136000000002,
"max": 2.4441835,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002671347507142858,
"min": 0.0002671347507142858,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003739886510000001,
"min": 0.003739886510000001,
"max": 0.10856029116999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00896767433732748,
"min": 0.00896767433732748,
"max": 0.5370119214057922,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12554743885993958,
"min": 0.12554743885993958,
"max": 3.7590832710266113,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 323.02247191011236,
"min": 323.02247191011236,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28749.0,
"min": 15984.0,
"max": 32180.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6544943648610222,
"min": -1.0000000521540642,
"max": 1.6544943648610222,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 147.24999847263098,
"min": -32.000001668930054,
"max": 147.24999847263098,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6544943648610222,
"min": -1.0000000521540642,
"max": 1.6544943648610222,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 147.24999847263098,
"min": -32.000001668930054,
"max": 147.24999847263098,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029821048834716923,
"min": 0.029821048834716923,
"max": 12.002160223200917,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.654073346289806,
"min": 2.654073346289806,
"max": 192.03456357121468,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675505460",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675507465"
},
"total": 2004.1656518869997,
"count": 1,
"self": 0.48444904400002997,
"children": {
"run_training.setup": {
"total": 0.09673215099996924,
"count": 1,
"self": 0.09673215099996924
},
"TrainerController.start_learning": {
"total": 2003.5844706919997,
"count": 1,
"self": 1.1246181880107997,
"children": {
"TrainerController._reset_env": {
"total": 6.256027156000073,
"count": 1,
"self": 6.256027156000073
},
"TrainerController.advance": {
"total": 1996.121717973989,
"count": 63722,
"self": 1.137158281033635,
"children": {
"env_step": {
"total": 1349.7405253799861,
"count": 63722,
"self": 1250.5141902210244,
"children": {
"SubprocessEnvManager._take_step": {
"total": 98.53393322399484,
"count": 63722,
"self": 4.001634289961885,
"children": {
"TorchPolicy.evaluate": {
"total": 94.53229893403295,
"count": 62549,
"self": 31.83752523906992,
"children": {
"TorchPolicy.sample_actions": {
"total": 62.69477369496303,
"count": 62549,
"self": 62.69477369496303
}
}
}
}
},
"workers": {
"total": 0.6924019349669379,
"count": 63722,
"self": 0.0,
"children": {
"worker_root": {
"total": 1999.8922105639888,
"count": 63722,
"is_parallel": true,
"self": 839.975341866015,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001991319999888219,
"count": 1,
"is_parallel": true,
"self": 0.0007051169998248952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001286203000063324,
"count": 8,
"is_parallel": true,
"self": 0.001286203000063324
}
}
},
"UnityEnvironment.step": {
"total": 0.04178942300018207,
"count": 1,
"is_parallel": true,
"self": 0.00044849500068266934,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003909079998720699,
"count": 1,
"is_parallel": true,
"self": 0.0003909079998720699
},
"communicator.exchange": {
"total": 0.03940286899978673,
"count": 1,
"is_parallel": true,
"self": 0.03940286899978673
},
"steps_from_proto": {
"total": 0.0015471509998405963,
"count": 1,
"is_parallel": true,
"self": 0.0004064049999215058,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011407459999190905,
"count": 8,
"is_parallel": true,
"self": 0.0011407459999190905
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1159.9168686979738,
"count": 63721,
"is_parallel": true,
"self": 27.040604095954222,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.097072546950585,
"count": 63721,
"is_parallel": true,
"self": 21.097072546950585
},
"communicator.exchange": {
"total": 1016.5242152310136,
"count": 63721,
"is_parallel": true,
"self": 1016.5242152310136
},
"steps_from_proto": {
"total": 95.25497682405535,
"count": 63721,
"is_parallel": true,
"self": 20.267969524131786,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.98700729992356,
"count": 509768,
"is_parallel": true,
"self": 74.98700729992356
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 645.2440343129692,
"count": 63722,
"self": 2.1269323009769323,
"children": {
"process_trajectory": {
"total": 139.3602263449934,
"count": 63722,
"self": 139.18021661199418,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18000973299922407,
"count": 2,
"self": 0.18000973299922407
}
}
},
"_update_policy": {
"total": 503.7568756669989,
"count": 441,
"self": 189.14328699202838,
"children": {
"TorchPPOOptimizer.update": {
"total": 314.6135886749705,
"count": 22833,
"self": 314.6135886749705
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.43000031838892e-07,
"count": 1,
"self": 9.43000031838892e-07
},
"TrainerController._save_models": {
"total": 0.08210643099982917,
"count": 1,
"self": 0.0013195709998399252,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08078685999998925,
"count": 1,
"self": 0.08078685999998925
}
}
}
}
}
}
}