{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.3238077461719513, "min": 0.30464068055152893, "max": 1.384185552597046, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 9740.13671875, "min": 9124.59765625, "max": 41990.65234375, "count": 33 }, "Pyramids.Step.mean": { "value": 989950.0, "min": 29952.0, "max": 989950.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989950.0, "min": 29952.0, "max": 989950.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.5838682055473328, "min": -0.09767741709947586, "max": 0.6215424537658691, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 163.48309326171875, "min": -23.149547576904297, "max": 172.78880310058594, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.029793789610266685, "min": -0.003969848155975342, "max": 0.5246034860610962, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 8.34226131439209, "min": -1.1155273914337158, "max": 124.33102416992188, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06607722899380385, "min": 0.06322968994532453, "max": 0.0752871887250674, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9250812059132538, "min": 0.5021530170777769, "max": 1.129307830876011, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.016124860695886086, "min": 0.0005738098474814362, "max": 0.018259115102929564, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.22574804974240523, "min": 0.0063119083222957974, "max": 0.2482711419773598, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.5734689041142845e-06, "min": 7.5734689041142845e-06, "max": 0.00029515063018788575, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00010602856465759998, "min": 0.00010602856465759998, "max": 0.0036337147887617995, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10252445714285714, "min": 0.10252445714285714, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4353424, "min": 1.3886848, "max": 2.6112382000000003, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.0002621932685714286, "min": 0.0002621932685714286, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.0036707057600000002, "min": 0.0036707057600000002, "max": 0.12114269618, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.049379862844944, "min": 0.04845596104860306, "max": 0.7750002145767212, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.6913180947303772, "min": 0.678383469581604, "max": 5.425001621246338, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 345.13483146067415, "min": 308.3707865168539, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 30717.0, "min": 15984.0, "max": 32908.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.4975055959284975, "min": -1.0000000521540642, "max": 1.665387734542696, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 133.27799803763628, "min": -31.99760165810585, "max": 163.2079979851842, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.4975055959284975, "min": -1.0000000521540642, "max": 1.665387734542696, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 133.27799803763628, "min": -31.99760165810585, "max": 163.2079979851842, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.17290613018244433, "min": 0.16724884639411502, "max": 16.172629699110985, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 15.388645586237544, "min": 15.08791030064458, "max": 258.76207518577576, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1704005800", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.1.2+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1704008244" }, "total": 2443.5232975890003, "count": 1, "self": 0.5954386670005078, "children": { "run_training.setup": { "total": 0.055205353000019386, "count": 1, "self": 0.055205353000019386 }, "TrainerController.start_learning": { "total": 2442.872653569, "count": 1, "self": 1.9799203779671188, "children": { "TrainerController._reset_env": { "total": 2.870515118000185, "count": 1, "self": 2.870515118000185 }, "TrainerController.advance": { "total": 2437.892613699032, "count": 63996, "self": 1.9108032131066466, "children": { "env_step": { "total": 1761.660532516033, "count": 63996, "self": 1603.308490314998, "children": { "SubprocessEnvManager._take_step": { "total": 157.15914342400583, "count": 63996, "self": 5.668157140059066, "children": { "TorchPolicy.evaluate": { "total": 151.49098628394677, "count": 62557, "self": 151.49098628394677 } } }, "workers": { "total": 1.1928987770293134, "count": 63996, "self": 0.0, "children": { "worker_root": { "total": 2436.475573560971, "count": 63996, "is_parallel": true, "self": 976.8024616489952, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.001827056000365701, "count": 1, "is_parallel": true, "self": 0.0005591039998762426, "children": { "_process_rank_one_or_two_observation": { "total": 0.0012679520004894584, "count": 8, "is_parallel": true, "self": 0.0012679520004894584 } } }, "UnityEnvironment.step": { "total": 0.07839620400000058, "count": 1, "is_parallel": true, "self": 0.0006100149994381354, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0004202600002827239, "count": 1, "is_parallel": true, "self": 0.0004202600002827239 }, "communicator.exchange": { "total": 0.07519608100028563, "count": 1, "is_parallel": true, "self": 0.07519608100028563 }, "steps_from_proto": { "total": 0.00216984799999409, "count": 1, "is_parallel": true, "self": 0.00035325300041222363, "children": { "_process_rank_one_or_two_observation": { "total": 0.0018165949995818664, "count": 8, "is_parallel": true, "self": 0.0018165949995818664 } } } } } } }, "UnityEnvironment.step": { "total": 1459.6731119119759, "count": 63995, "is_parallel": true, "self": 39.519646650917366, "children": { "UnityEnvironment._generate_step_input": { "total": 26.273796364007012, "count": 63995, "is_parallel": true, "self": 26.273796364007012 }, "communicator.exchange": { "total": 1283.8651440250578, "count": 63995, "is_parallel": true, "self": 1283.8651440250578 }, "steps_from_proto": { "total": 110.01452487199367, "count": 63995, "is_parallel": true, "self": 23.71707621517635, "children": { "_process_rank_one_or_two_observation": { "total": 86.29744865681732, "count": 511960, "is_parallel": true, "self": 86.29744865681732 } } } } } } } } } } }, "trainer_advance": { "total": 674.3212779698924, "count": 63996, "self": 3.6119213008855695, "children": { "process_trajectory": { "total": 137.89328357299382, "count": 63996, "self": 137.65331308799387, "children": { "RLTrainer._checkpoint": { "total": 0.239970484999958, "count": 2, "self": 0.239970484999958 } } }, "_update_policy": { "total": 532.816073096013, "count": 453, "self": 315.8378356030812, "children": { "TorchPPOOptimizer.update": { "total": 216.97823749293184, "count": 22788, "self": 216.97823749293184 } } } } } } }, "trainer_threads": { "total": 1.0920002750935964e-06, "count": 1, "self": 1.0920002750935964e-06 }, "TrainerController._save_models": { "total": 0.12960328200006188, "count": 1, "self": 0.002406681000138633, "children": { "RLTrainer._checkpoint": { "total": 0.12719660099992325, "count": 1, "self": 0.12719660099992325 } } } } } } }