infinitejoy's picture
First Pyramids
da96714
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1852513700723648,
"min": 0.1828487664461136,
"max": 1.5463743209838867,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5557.541015625,
"min": 5459.1328125,
"max": 46910.8125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999912.0,
"min": 29932.0,
"max": 2999912.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999912.0,
"min": 29932.0,
"max": 2999912.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7237173318862915,
"min": -0.13040199875831604,
"max": 0.8135476112365723,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 211.32545471191406,
"min": -31.296480178833008,
"max": 245.69137573242188,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006164701655507088,
"min": -0.024830089882016182,
"max": 0.20712080597877502,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.8000929355621338,
"min": -6.952425003051758,
"max": 49.08763122558594,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06884813228806648,
"min": 0.06506748594983948,
"max": 0.07557280880960092,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9638738520329306,
"min": 0.5113399944520066,
"max": 1.1017595254793289,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015028054526893236,
"min": 6.47512603286681e-05,
"max": 0.016954349202153805,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2103927633765053,
"min": 0.0007770151239440172,
"max": 0.23736088883015327,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4765209364309457e-06,
"min": 1.4765209364309457e-06,
"max": 0.00029841125767243813,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.067129311003324e-05,
"min": 2.067129311003324e-05,
"max": 0.0036757754747415664,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049214047619048,
"min": 0.10049214047619048,
"max": 0.19947041904761906,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4068899666666668,
"min": 1.3962929333333334,
"max": 2.7074662000000003,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.9164833571428365e-05,
"min": 5.9164833571428365e-05,
"max": 0.009947094862857142,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008283076699999971,
"min": 0.0008283076699999971,
"max": 0.12254331749,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.003426314564421773,
"min": 0.0033297636546194553,
"max": 0.2763366103172302,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.04796840250492096,
"min": 0.04661669209599495,
"max": 1.9343562126159668,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 265.14406779661016,
"min": 227.2258064516129,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31287.0,
"min": 16603.0,
"max": 33687.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6859355781669334,
"min": -0.9999500517733395,
"max": 1.7707741815716989,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 198.94039822369814,
"min": -31.998401656746864,
"max": 227.8107985854149,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6859355781669334,
"min": -0.9999500517733395,
"max": 1.7707741815716989,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 198.94039822369814,
"min": -31.998401656746864,
"max": 227.8107985854149,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.009489890193219901,
"min": 0.008390802981703319,
"max": 5.34306164918577,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.1198070427999482,
"min": 1.0152871607861016,
"max": 90.83204803615808,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657461670",
"python_version": "3.9.5 (default, Nov 23 2021, 15:27:38) \n[GCC 9.3.0]",
"command_line_arguments": "/local_disk0/.ephemeral_nfs/envs/pythonEnv-fb4aed1f-7c07-4ca5-b819-aeecdc6ccb9f/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.20.3",
"end_time_seconds": "1657466363"
},
"total": 4692.737395972001,
"count": 1,
"self": 0.2702085850014555,
"children": {
"run_training.setup": {
"total": 0.08080957099991792,
"count": 1,
"self": 0.08080957099991792
},
"TrainerController.start_learning": {
"total": 4692.386377815999,
"count": 1,
"self": 3.9103825940128445,
"children": {
"TrainerController._reset_env": {
"total": 12.672993666000366,
"count": 1,
"self": 12.672993666000366
},
"TrainerController.advance": {
"total": 4675.684551769987,
"count": 193767,
"self": 3.925852716826739,
"children": {
"env_step": {
"total": 2830.8489869019786,
"count": 193767,
"self": 2515.6984759872616,
"children": {
"SubprocessEnvManager._take_step": {
"total": 312.56576160497843,
"count": 193767,
"self": 12.271461569627718,
"children": {
"TorchPolicy.evaluate": {
"total": 300.2943000353507,
"count": 187568,
"self": 98.993900600497,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.30039943485372,
"count": 187568,
"self": 201.30039943485372
}
}
}
}
},
"workers": {
"total": 2.584749309738527,
"count": 193767,
"self": 0.0,
"children": {
"worker_root": {
"total": 4685.897691749033,
"count": 193767,
"is_parallel": true,
"self": 2412.6613520708115,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001852370000051451,
"count": 1,
"is_parallel": true,
"self": 0.0009680760003902833,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008842939996611676,
"count": 8,
"is_parallel": true,
"self": 0.0008842939996611676
}
}
},
"UnityEnvironment.step": {
"total": 0.025506631000098423,
"count": 1,
"is_parallel": true,
"self": 0.00038378000044758664,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002961400000458525,
"count": 1,
"is_parallel": true,
"self": 0.0002961400000458525
},
"communicator.exchange": {
"total": 0.023825390999718365,
"count": 1,
"is_parallel": true,
"self": 0.023825390999718365
},
"steps_from_proto": {
"total": 0.0010013199998866185,
"count": 1,
"is_parallel": true,
"self": 0.00023629300039829104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007650269994883274,
"count": 8,
"is_parallel": true,
"self": 0.0007650269994883274
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2273.236339678221,
"count": 193766,
"is_parallel": true,
"self": 67.02309798906845,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 41.81772355412431,
"count": 193766,
"is_parallel": true,
"self": 41.81772355412431
},
"communicator.exchange": {
"total": 1991.822494931881,
"count": 193766,
"is_parallel": true,
"self": 1991.822494931881
},
"steps_from_proto": {
"total": 172.5730232031474,
"count": 193766,
"is_parallel": true,
"self": 40.4992663508242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 132.07375685232319,
"count": 1550128,
"is_parallel": true,
"self": 132.07375685232319
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1840.9097121511813,
"count": 193767,
"self": 6.888272713986225,
"children": {
"process_trajectory": {
"total": 396.20563162719054,
"count": 193767,
"self": 395.4405441641902,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7650874630003273,
"count": 6,
"self": 0.7650874630003273
}
}
},
"_update_policy": {
"total": 1437.8158078100046,
"count": 1381,
"self": 572.8118601500323,
"children": {
"TorchPPOOptimizer.update": {
"total": 865.0039476599723,
"count": 68415,
"self": 865.0039476599723
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.019997039809823e-07,
"count": 1,
"self": 8.019997039809823e-07
},
"TrainerController._save_models": {
"total": 0.11844898399976955,
"count": 1,
"self": 0.0008768910001890617,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11757209299958049,
"count": 1,
"self": 0.11757209299958049
}
}
}
}
}
}
}