testpyramidsrnd / run_logs /timers.json
michael20at's picture
First Pyramids
d69fd88
raw
history blame contribute delete
No virus
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1519065648317337,
"min": 0.146709606051445,
"max": 1.4485841989517212,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4532.89208984375,
"min": 4373.1201171875,
"max": 43944.25,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999916.0,
"min": 29952.0,
"max": 2999916.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999916.0,
"min": 29952.0,
"max": 2999916.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8808816075325012,
"min": -0.24982436001300812,
"max": 0.8808816075325012,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 263.38360595703125,
"min": -59.2083740234375,
"max": 263.38360595703125,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0031910657417029142,
"min": -0.06275332719087601,
"max": 0.551007866859436,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.9541286826133728,
"min": -18.323970794677734,
"max": 130.5888671875,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06667955407049574,
"min": 0.06213645751579732,
"max": 0.07380040665950446,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9335137569869404,
"min": 0.5166028466165312,
"max": 1.0914608400392658,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014350790452832978,
"min": 0.0011039727543285683,
"max": 0.015871432327133207,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2009110663396617,
"min": 0.012143700297614251,
"max": 0.22675631587067163,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.543663771192862e-06,
"min": 1.543663771192862e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.1611292796700067e-05,
"min": 2.1611292796700067e-05,
"max": 0.004053203448932199,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10051452142857144,
"min": 0.10051452142857144,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4072033000000002,
"min": 1.3962282666666668,
"max": 2.8125069,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.140069071428589e-05,
"min": 6.140069071428589e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008596096700000024,
"min": 0.0008596096700000024,
"max": 0.13511167322,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007030579727143049,
"min": 0.006488851271569729,
"max": 0.5527361631393433,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09842811524868011,
"min": 0.09084391593933105,
"max": 3.8691532611846924,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 203.14285714285714,
"min": 203.14285714285714,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27018.0,
"min": 15984.0,
"max": 32681.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.781812019534129,
"min": -1.0000000521540642,
"max": 1.7917625759359743,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 236.98099859803915,
"min": -30.71260165423155,
"max": 249.05499805510044,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.781812019534129,
"min": -1.0000000521540642,
"max": 1.7917625759359743,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 236.98099859803915,
"min": -30.71260165423155,
"max": 249.05499805510044,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.014713893364633511,
"min": 0.014713893364633511,
"max": 10.769489921629429,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.956947817496257,
"min": 1.956947817496257,
"max": 172.31183874607086,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1663652908",
"python_version": "3.7.14 (default, Sep 8 2022, 00:06:44) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1663659732"
},
"total": 6823.655828139001,
"count": 1,
"self": 0.4380322620008883,
"children": {
"run_training.setup": {
"total": 0.052285955000002104,
"count": 1,
"self": 0.052285955000002104
},
"TrainerController.start_learning": {
"total": 6823.165509922,
"count": 1,
"self": 4.322114260139642,
"children": {
"TrainerController._reset_env": {
"total": 10.424575584000024,
"count": 1,
"self": 10.424575584000024
},
"TrainerController.advance": {
"total": 6808.32239480286,
"count": 194887,
"self": 4.370861308726489,
"children": {
"env_step": {
"total": 4702.866568041112,
"count": 194887,
"self": 4382.213663380145,
"children": {
"SubprocessEnvManager._take_step": {
"total": 318.3032546269897,
"count": 194887,
"self": 13.959708231118839,
"children": {
"TorchPolicy.evaluate": {
"total": 304.3435463958709,
"count": 187549,
"self": 104.02258748871799,
"children": {
"TorchPolicy.sample_actions": {
"total": 200.32095890715289,
"count": 187549,
"self": 200.32095890715289
}
}
}
}
},
"workers": {
"total": 2.34965003397798,
"count": 194887,
"self": 0.0,
"children": {
"worker_root": {
"total": 6810.354721428062,
"count": 194887,
"is_parallel": true,
"self": 2732.7685380851203,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005749642000012045,
"count": 1,
"is_parallel": true,
"self": 0.004516225999964263,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012334160000477823,
"count": 8,
"is_parallel": true,
"self": 0.0012334160000477823
}
}
},
"UnityEnvironment.step": {
"total": 0.04576541599999473,
"count": 1,
"is_parallel": true,
"self": 0.000515414000005876,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042740700001786536,
"count": 1,
"is_parallel": true,
"self": 0.00042740700001786536
},
"communicator.exchange": {
"total": 0.04315310699996644,
"count": 1,
"is_parallel": true,
"self": 0.04315310699996644
},
"steps_from_proto": {
"total": 0.0016694880000045487,
"count": 1,
"is_parallel": true,
"self": 0.0004220439999471637,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001247444000057385,
"count": 8,
"is_parallel": true,
"self": 0.001247444000057385
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4077.5861833429412,
"count": 194886,
"is_parallel": true,
"self": 86.08284411611112,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 69.172400512845,
"count": 194886,
"is_parallel": true,
"self": 69.172400512845
},
"communicator.exchange": {
"total": 3642.0390962290085,
"count": 194886,
"is_parallel": true,
"self": 3642.0390962290085
},
"steps_from_proto": {
"total": 280.2918424849766,
"count": 194886,
"is_parallel": true,
"self": 70.4321200502464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 209.8597224347302,
"count": 1559088,
"is_parallel": true,
"self": 209.8597224347302
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2101.0849654530216,
"count": 194887,
"self": 8.188185902986788,
"children": {
"process_trajectory": {
"total": 483.9639206030234,
"count": 194887,
"self": 483.38015089802394,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5837697049994404,
"count": 6,
"self": 0.5837697049994404
}
}
},
"_update_policy": {
"total": 1608.9328589470115,
"count": 1401,
"self": 634.2073622789515,
"children": {
"TorchPPOOptimizer.update": {
"total": 974.72549666806,
"count": 68358,
"self": 974.72549666806
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1209995136596262e-06,
"count": 1,
"self": 1.1209995136596262e-06
},
"TrainerController._save_models": {
"total": 0.09642415400048776,
"count": 1,
"self": 0.0016449480008304818,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09477920599965728,
"count": 1,
"self": 0.09477920599965728
}
}
}
}
}
}
}