ppo-Pyramids / run_logs /timers.json
wangyuhao's picture
init commit
5bf0821 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.47815948724746704,
"min": 0.47815948724746704,
"max": 1.4071797132492065,
"count": 36
},
"Pyramids.Policy.Entropy.sum": {
"value": 14291.23046875,
"min": 14291.23046875,
"max": 42688.203125,
"count": 36
},
"Pyramids.Step.mean": {
"value": 1079945.0,
"min": 29952.0,
"max": 1079945.0,
"count": 36
},
"Pyramids.Step.sum": {
"value": 1079945.0,
"min": 29952.0,
"max": 1079945.0,
"count": 36
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6419244408607483,
"min": -0.11252445727586746,
"max": 0.6419244408607483,
"count": 36
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 179.7388458251953,
"min": -27.230918884277344,
"max": 180.59671020507812,
"count": 36
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.010029821656644344,
"min": -0.010826490819454193,
"max": 0.49480146169662476,
"count": 36
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.808350086212158,
"min": -2.8798465728759766,
"max": 117.2679443359375,
"count": 36
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06839069515644085,
"min": 0.06356442188067982,
"max": 0.07343371467483369,
"count": 36
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9574697321901718,
"min": 0.500739667878082,
"max": 1.090445691855469,
"count": 36
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014643920549826274,
"min": 0.0005605292232609406,
"max": 0.014891801925495244,
"count": 36
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20501488769756784,
"min": 0.007847409125653168,
"max": 0.2166041955545855,
"count": 36
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0001935294854901833,
"min": 0.0001935294854901833,
"max": 0.00029838354339596195,
"count": 36
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0027094127968625664,
"min": 0.0020886848037717336,
"max": 0.004010992663002467,
"count": 36
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16450981666666667,
"min": 0.16450981666666667,
"max": 0.19946118095238097,
"count": 36
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.303137433333333,
"min": 1.3962282666666668,
"max": 2.737553433333334,
"count": 36
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006454530685,
"min": 0.006454530685,
"max": 0.009946171977142856,
"count": 36
},
"Pyramids.Policy.Beta.sum": {
"value": 0.09036342959,
"min": 0.06962320384,
"max": 0.13370605358,
"count": 36
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009860126301646233,
"min": 0.008886665105819702,
"max": 0.4953594505786896,
"count": 36
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13804176449775696,
"min": 0.12441331148147583,
"max": 3.4675161838531494,
"count": 36
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 312.3,
"min": 292.43564356435644,
"max": 999.0,
"count": 36
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28107.0,
"min": 15984.0,
"max": 32703.0,
"count": 36
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6210066413713826,
"min": -1.0000000521540642,
"max": 1.6210066413713826,
"count": 36
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.89059772342443,
"min": -29.163801692426205,
"max": 162.46159832179546,
"count": 36
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6210066413713826,
"min": -1.0000000521540642,
"max": 1.6210066413713826,
"count": 36
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.89059772342443,
"min": -29.163801692426205,
"max": 162.46159832179546,
"count": 36
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03258738735959115,
"min": 0.030348350527341302,
"max": 10.07096153870225,
"count": 36
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9328648623632034,
"min": 2.7411322458283394,
"max": 161.135384619236,
"count": 36
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 36
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 36
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711385354",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711387896"
},
"total": 2542.346151972,
"count": 1,
"self": 0.5309858070004339,
"children": {
"run_training.setup": {
"total": 0.051982106999730604,
"count": 1,
"self": 0.051982106999730604
},
"TrainerController.start_learning": {
"total": 2541.7631840579998,
"count": 1,
"self": 1.7213359299171316,
"children": {
"TrainerController._reset_env": {
"total": 2.2638276279999445,
"count": 1,
"self": 2.2638276279999445
},
"TrainerController.advance": {
"total": 2537.7780191400834,
"count": 70343,
"self": 1.8593174349753099,
"children": {
"env_step": {
"total": 1832.1659136910534,
"count": 70343,
"self": 1671.802882216141,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.29616255194878,
"count": 70343,
"self": 5.766553772971747,
"children": {
"TorchPolicy.evaluate": {
"total": 153.52960877897704,
"count": 68888,
"self": 153.52960877897704
}
}
},
"workers": {
"total": 1.0668689229637494,
"count": 70342,
"self": 0.0,
"children": {
"worker_root": {
"total": 2535.909101165019,
"count": 70342,
"is_parallel": true,
"self": 1010.0970267519479,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002364308999858622,
"count": 1,
"is_parallel": true,
"self": 0.0007468940002581803,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016174149996004417,
"count": 8,
"is_parallel": true,
"self": 0.0016174149996004417
}
}
},
"UnityEnvironment.step": {
"total": 0.08976130900009593,
"count": 1,
"is_parallel": true,
"self": 0.0006464590001087345,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004880199999206525,
"count": 1,
"is_parallel": true,
"self": 0.0004880199999206525
},
"communicator.exchange": {
"total": 0.08697422899967933,
"count": 1,
"is_parallel": true,
"self": 0.08697422899967933
},
"steps_from_proto": {
"total": 0.0016526010003872216,
"count": 1,
"is_parallel": true,
"self": 0.00032361900002797483,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013289820003592467,
"count": 8,
"is_parallel": true,
"self": 0.0013289820003592467
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1525.812074413071,
"count": 70341,
"is_parallel": true,
"self": 40.40756634888703,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.177959748069952,
"count": 70341,
"is_parallel": true,
"self": 28.177959748069952
},
"communicator.exchange": {
"total": 1336.9210302860288,
"count": 70341,
"is_parallel": true,
"self": 1336.9210302860288
},
"steps_from_proto": {
"total": 120.3055180300853,
"count": 70341,
"is_parallel": true,
"self": 25.02236169707021,
"children": {
"_process_rank_one_or_two_observation": {
"total": 95.28315633301509,
"count": 562728,
"is_parallel": true,
"self": 95.28315633301509
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 703.7527880140547,
"count": 70342,
"self": 3.2670534280186985,
"children": {
"process_trajectory": {
"total": 148.5039861730279,
"count": 70342,
"self": 148.2440870410278,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2598991320001005,
"count": 2,
"self": 0.2598991320001005
}
}
},
"_update_policy": {
"total": 551.9817484130081,
"count": 500,
"self": 320.7032374350101,
"children": {
"TorchPPOOptimizer.update": {
"total": 231.27851097799794,
"count": 25053,
"self": 231.27851097799794
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3599992598756216e-06,
"count": 1,
"self": 1.3599992598756216e-06
}
}
}
}
}