{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.3776736259460449, "min": 0.3776736259460449, "max": 1.2476093769073486, "count": 20 }, "Pyramids.Policy.Entropy.sum": { "value": 18998.494140625, "min": 18998.494140625, "max": 62440.35546875, "count": 20 }, "Pyramids.Step.mean": { "value": 999874.0, "min": 49920.0, "max": 999874.0, "count": 20 }, "Pyramids.Step.sum": { "value": 999874.0, "min": 49920.0, "max": 999874.0, "count": 20 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.6256685256958008, "min": -0.06844426691532135, "max": 0.672815203666687, "count": 20 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 292.8128662109375, "min": -27.51459503173828, "max": 322.9512939453125, "count": 20 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.018741514533758163, "min": -0.0005381633527576923, "max": 0.3810485899448395, "count": 20 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 8.771028518676758, "min": -0.2561657428741455, "max": 152.03839111328125, "count": 20 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.0697640247633578, "min": 0.06765343706701613, "max": 0.07579705501969632, "count": 20 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 1.6743365943205872, "min": 0.9095646602363558, "max": 1.7159414038070084, "count": 20 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.01741114089500294, "min": 0.002489177529720798, "max": 0.01741114089500294, "count": 20 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.4178673814800705, "min": 0.04729437306469516, "max": 0.4178673814800705, "count": 20 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.3296975567999975e-06, "min": 7.3296975567999975e-06, "max": 0.00029215680261440003, "count": 20 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00017591274136319993, "min": 0.00017591274136319993, "max": 0.006032718889093801, "count": 20 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.1024432, "min": 0.1024432, "max": 0.19738560000000002, "count": 20 }, "Pyramids.Policy.Epsilon.sum": { "value": 2.4586368, "min": 2.3686272, "max": 4.3109062, "count": 20 }, "Pyramids.Policy.Beta.mean": { "value": 0.0002540756799999999, "min": 0.0002540756799999999, "max": 0.00973882144, "count": 20 }, "Pyramids.Policy.Beta.sum": { "value": 0.0060978163199999985, "min": 0.0060978163199999985, "max": 0.20111952938000002, "count": 20 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.0150672048330307, "min": 0.014632516540586948, "max": 0.37728455662727356, "count": 20 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.3616129159927368, "min": 0.35118040442466736, "max": 4.527414798736572, "count": 20 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 294.874251497006, "min": 276.92090395480227, "max": 999.0, "count": 20 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 49244.0, "min": 46491.0, "max": 53316.0, "count": 20 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.645226325862065, "min": -1.0000000521540642, "max": 1.6778700413171854, "count": 20 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 274.75279641896486, "min": -48.00000250339508, "max": 296.9829973131418, "count": 20 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.645226325862065, "min": -1.0000000521540642, "max": 1.6778700413171854, "count": 20 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 274.75279641896486, "min": -48.00000250339508, "max": 296.9829973131418, "count": 20 }, "Pyramids.Policy.RndReward.mean": { "value": 0.04574964659164442, "min": 0.04574964659164442, "max": 5.083442655702432, "count": 20 }, "Pyramids.Policy.RndReward.sum": { "value": 7.6401909808046184, "min": 7.538699409909896, "max": 244.00524747371674, "count": 20 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1657728190", "python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training1 --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1657731909" }, "total": 3719.51996703, "count": 1, "self": 0.5429586899999777, "children": { "run_training.setup": { "total": 0.08578315099975953, "count": 1, "self": 0.08578315099975953 }, "TrainerController.start_learning": { "total": 3718.891225189, "count": 1, "self": 2.896064432078674, "children": { "TrainerController._reset_env": { "total": 5.709341595999831, "count": 1, "self": 5.709341595999831 }, "TrainerController.advance": { "total": 3710.1862332859214, "count": 64352, "self": 3.083193245041457, "children": { "env_step": { "total": 2345.712429656001, "count": 64352, "self": 2182.784042892131, "children": { "SubprocessEnvManager._take_step": { "total": 161.2830565309241, "count": 64352, "self": 8.515540034939932, "children": { "TorchPolicy.evaluate": { "total": 152.76751649598418, "count": 62570, "self": 34.58320609797693, "children": { "TorchPolicy.sample_actions": { "total": 118.18431039800726, "count": 62570, "self": 118.18431039800726 } } } } }, "workers": { "total": 1.6453302329459802, "count": 64352, "self": 0.0, "children": { "worker_root": { "total": 3711.5316603228575, "count": 64352, "is_parallel": true, "self": 1719.0149781576533, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0033231490001526254, "count": 1, "is_parallel": true, "self": 0.0012802779997400648, "children": { "_process_rank_one_or_two_observation": { "total": 0.0020428710004125605, "count": 8, "is_parallel": true, "self": 0.0020428710004125605 } } }, "UnityEnvironment.step": { "total": 0.08057971700009148, "count": 1, "is_parallel": true, "self": 0.0006640510000579525, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0005635749998873507, "count": 1, "is_parallel": true, "self": 0.0005635749998873507 }, "communicator.exchange": { "total": 0.07720498900016537, "count": 1, "is_parallel": true, "self": 0.07720498900016537 }, "steps_from_proto": { "total": 0.002147101999980805, "count": 1, "is_parallel": true, "self": 0.000598724999690603, "children": { "_process_rank_one_or_two_observation": { "total": 0.001548377000290202, "count": 8, "is_parallel": true, "self": 0.001548377000290202 } } } } } } }, "UnityEnvironment.step": { "total": 1992.5166821652042, "count": 64351, "is_parallel": true, "self": 44.97937237126962, "children": { "UnityEnvironment._generate_step_input": { "total": 28.048393197027963, "count": 64351, "is_parallel": true, "self": 28.048393197027963 }, "communicator.exchange": { "total": 1782.6700106720255, "count": 64351, "is_parallel": true, "self": 1782.6700106720255 }, "steps_from_proto": { "total": 136.81890592488116, "count": 64351, "is_parallel": true, "self": 37.6737201008591, "children": { "_process_rank_one_or_two_observation": { "total": 99.14518582402206, "count": 514808, "is_parallel": true, "self": 99.14518582402206 } } } } } } } } } } }, "trainer_advance": { "total": 1361.390610384879, "count": 64352, "self": 6.0017889249479595, "children": { "process_trajectory": { "total": 251.1647456229298, "count": 64352, "self": 249.97967827492994, "children": { "RLTrainer._checkpoint": { "total": 1.1850673479998477, "count": 10, "self": 1.1850673479998477 } } }, "_update_policy": { "total": 1104.2240758370012, "count": 456, "self": 277.4872305620506, "children": { "TorchPPOOptimizer.update": { "total": 826.7368452749506, "count": 22851, "self": 826.7368452749506 } } } } } } }, "trainer_threads": { "total": 1.3480002962751314e-06, "count": 1, "self": 1.3480002962751314e-06 }, "TrainerController._save_models": { "total": 0.09958452699993359, "count": 1, "self": 0.002081748999444244, "children": { "RLTrainer._checkpoint": { "total": 0.09750277800048934, "count": 1, "self": 0.09750277800048934 } } } } } } }