pyramids / run_logs /timers.json
Johnlhugface's picture
train for pyramids
a9f9683
raw
history blame contribute delete
No virus
18.8 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.15419963002204895,
"min": 0.1439506560564041,
"max": 1.4417672157287598,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4613.65283203125,
"min": 4346.158203125,
"max": 43737.44921875,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999915.0,
"min": 29981.0,
"max": 2999915.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999915.0,
"min": 29981.0,
"max": 2999915.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7366708517074585,
"min": -0.09642678499221802,
"max": 0.8856813311576843,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 218.05458068847656,
"min": -23.238855361938477,
"max": 273.675537109375,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.015807395800948143,
"min": -0.010172320529818535,
"max": 0.30925413966178894,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.678989410400391,
"min": -3.0008344650268555,
"max": 74.22099304199219,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06624905925751332,
"min": 0.06455514475306874,
"max": 0.07447222975038346,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9274868296051865,
"min": 0.5029721026285083,
"max": 1.0506878335875753,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015946684902744545,
"min": 9.625917285858086e-05,
"max": 0.017056086639751725,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22325358863842362,
"min": 0.0012513692471615512,
"max": 0.2535860696904516,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.569978048135712e-06,
"min": 1.569978048135712e-06,
"max": 0.0002984123862434905,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.197969267389997e-05,
"min": 2.197969267389997e-05,
"max": 0.0037590180469940326,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10052329285714286,
"min": 0.10052329285714286,
"max": 0.19947079523809524,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4073261,
"min": 1.3962955666666668,
"max": 2.7075139666666663,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.227695642857136e-05,
"min": 6.227695642857136e-05,
"max": 0.009947132444285713,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008718773899999992,
"min": 0.0008718773899999992,
"max": 0.12531529607000003,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005896520335227251,
"min": 0.005484605673700571,
"max": 0.5323460698127747,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08255128562450409,
"min": 0.07884299755096436,
"max": 3.7264225482940674,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 249.256,
"min": 208.79591836734693,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31157.0,
"min": 16652.0,
"max": 33642.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7012999837076472,
"min": -1.0000000521540642,
"max": 1.790217672278281,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 210.96119797974825,
"min": -32.000001668930054,
"max": 263.1619978249073,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7012999837076472,
"min": -1.0000000521540642,
"max": 1.790217672278281,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 210.96119797974825,
"min": -32.000001668930054,
"max": 263.1619978249073,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.015280251960575697,
"min": 0.012767925405061921,
"max": 10.269266018534408,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8947512431113864,
"min": 1.6837215152481804,
"max": 174.57752231508493,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701961697",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701969641"
},
"total": 7943.532301667,
"count": 1,
"self": 0.48971800099934626,
"children": {
"run_training.setup": {
"total": 0.062130511000077604,
"count": 1,
"self": 0.062130511000077604
},
"TrainerController.start_learning": {
"total": 7942.980453155001,
"count": 1,
"self": 4.816256082775908,
"children": {
"TrainerController._reset_env": {
"total": 3.683077721000018,
"count": 1,
"self": 3.683077721000018
},
"TrainerController.advance": {
"total": 7934.400853264224,
"count": 194913,
"self": 4.914871119586678,
"children": {
"env_step": {
"total": 5924.135419012621,
"count": 194913,
"self": 5491.758064252891,
"children": {
"SubprocessEnvManager._take_step": {
"total": 429.4342939949072,
"count": 194913,
"self": 15.616328825285791,
"children": {
"TorchPolicy.evaluate": {
"total": 413.8179651696214,
"count": 187549,
"self": 413.8179651696214
}
}
},
"workers": {
"total": 2.9430607648228033,
"count": 194913,
"self": 0.0,
"children": {
"worker_root": {
"total": 7926.968700177998,
"count": 194913,
"is_parallel": true,
"self": 2841.870881891001,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005556724999905782,
"count": 1,
"is_parallel": true,
"self": 0.004163067999911618,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001393656999994164,
"count": 8,
"is_parallel": true,
"self": 0.001393656999994164
}
}
},
"UnityEnvironment.step": {
"total": 0.05895132599994213,
"count": 1,
"is_parallel": true,
"self": 0.0006423359999416789,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006649890000289815,
"count": 1,
"is_parallel": true,
"self": 0.0006649890000289815
},
"communicator.exchange": {
"total": 0.055636560000039026,
"count": 1,
"is_parallel": true,
"self": 0.055636560000039026
},
"steps_from_proto": {
"total": 0.0020074409999324416,
"count": 1,
"is_parallel": true,
"self": 0.00042908799980523327,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015783530001272084,
"count": 8,
"is_parallel": true,
"self": 0.0015783530001272084
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5085.097818286997,
"count": 194912,
"is_parallel": true,
"self": 110.48581408493192,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.84881484473817,
"count": 194912,
"is_parallel": true,
"self": 78.84881484473817
},
"communicator.exchange": {
"total": 4569.461790351076,
"count": 194912,
"is_parallel": true,
"self": 4569.461790351076
},
"steps_from_proto": {
"total": 326.30139900625,
"count": 194912,
"is_parallel": true,
"self": 67.58338236588793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 258.71801664036207,
"count": 1559296,
"is_parallel": true,
"self": 258.71801664036207
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2005.3505631320158,
"count": 194913,
"self": 9.425250947861969,
"children": {
"process_trajectory": {
"total": 421.5178631541594,
"count": 194913,
"self": 420.97843281115865,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5394303430007312,
"count": 6,
"self": 0.5394303430007312
}
}
},
"_update_policy": {
"total": 1574.4074490299945,
"count": 1395,
"self": 951.871964159049,
"children": {
"TorchPPOOptimizer.update": {
"total": 622.5354848709455,
"count": 68373,
"self": 622.5354848709455
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.620008884463459e-07,
"count": 1,
"self": 8.620008884463459e-07
},
"TrainerController._save_models": {
"total": 0.080265225000403,
"count": 1,
"self": 0.0014251680004235823,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07884005699997942,
"count": 1,
"self": 0.07884005699997942
}
}
}
}
}
}
}