{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.45728620886802673, "min": 0.45728620886802673, "max": 1.4416059255599976, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 13689.3203125, "min": 13689.3203125, "max": 43732.55859375, "count": 33 }, "Pyramids.Step.mean": { "value": 989995.0, "min": 29914.0, "max": 989995.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989995.0, "min": 29914.0, "max": 989995.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.5068914294242859, "min": -0.11628410965204239, "max": 0.565357506275177, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 141.92959594726562, "min": -28.024471282958984, "max": 154.907958984375, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.10321734100580215, "min": -0.040640491992235184, "max": 0.3118896484375, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 28.900856018066406, "min": -10.972932815551758, "max": 75.1654052734375, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.07029747921502481, "min": 0.0663410385831175, "max": 0.07525485877573089, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 1.0544621882253722, "min": 0.5767015421400685, "max": 1.0767432126061371, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.017238268237051196, "min": 0.000568888726809759, "max": 0.01851767444478006, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.2585740235557679, "min": 0.0062577759949073485, "max": 0.25924744222692087, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.434857521746665e-06, "min": 7.434857521746665e-06, "max": 0.00029484427671857497, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00011152286282619997, "min": 0.00011152286282619997, "max": 0.0035071550309484, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10247825333333335, "min": 0.10247825333333335, "max": 0.198281425, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.5371738000000004, "min": 1.4775001, "max": 2.5690515999999994, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.00025757750799999997, "min": 0.00025757750799999997, "max": 0.0098283143575, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.0038636626199999996, "min": 0.0038636626199999996, "max": 0.11692825484, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.013212502934038639, "min": 0.013212502934038639, "max": 0.4919722080230713, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.19818754494190216, "min": 0.19396045804023743, "max": 3.9357776641845703, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 337.7294117647059, "min": 328.4942528735632, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 28707.0, "min": 16457.0, "max": 33450.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.59165880224284, "min": -0.9999375520274043, "max": 1.6253139321194139, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 135.2909981906414, "min": -31.998001664876938, "max": 141.00119905918837, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.59165880224284, "min": -0.9999375520274043, "max": 1.6253139321194139, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 135.2909981906414, "min": -31.998001664876938, "max": 141.00119905918837, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.046841042914242924, "min": 0.046841042914242924, "max": 10.017221994259778, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 3.9814886477106484, "min": 3.9814886477106484, "max": 170.29277390241623, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1716546092", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.3.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1716548902" }, "total": 2810.32657578, "count": 1, "self": 0.6416263659998549, "children": { "run_training.setup": { "total": 0.068972891000044, "count": 1, "self": 0.068972891000044 }, "TrainerController.start_learning": { "total": 2809.615976523, "count": 1, "self": 1.952965131072233, "children": { "TrainerController._reset_env": { "total": 4.3222840829999996, "count": 1, "self": 4.3222840829999996 }, "TrainerController.advance": { "total": 2803.240112839928, "count": 63772, "self": 2.1416022639054972, "children": { "env_step": { "total": 2014.8691629780496, "count": 63772, "self": 1834.1915422299942, "children": { "SubprocessEnvManager._take_step": { "total": 179.43114572900492, "count": 63772, "self": 6.376321151017464, "children": { "TorchPolicy.evaluate": { "total": 173.05482457798746, "count": 62570, "self": 173.05482457798746 } } }, "workers": { "total": 1.2464750190505356, "count": 63772, "self": 0.0, "children": { "worker_root": { "total": 2802.644595833031, "count": 63772, "is_parallel": true, "self": 1132.1600464880175, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.010335770000097, "count": 1, "is_parallel": true, "self": 0.008174658000143609, "children": { "_process_rank_one_or_two_observation": { "total": 0.002161111999953391, "count": 8, "is_parallel": true, "self": 0.002161111999953391 } } }, "UnityEnvironment.step": { "total": 0.06490054500000042, "count": 1, "is_parallel": true, "self": 0.0007121190000134447, "children": { "UnityEnvironment._generate_step_input": { "total": 0.000535884999976588, "count": 1, "is_parallel": true, "self": 0.000535884999976588 }, "communicator.exchange": { "total": 0.0617812350000122, "count": 1, "is_parallel": true, "self": 0.0617812350000122 }, "steps_from_proto": { "total": 0.0018713059999981851, "count": 1, "is_parallel": true, "self": 0.0004494039999372035, "children": { "_process_rank_one_or_two_observation": { "total": 0.0014219020000609817, "count": 8, "is_parallel": true, "self": 0.0014219020000609817 } } } } } } }, "UnityEnvironment.step": { "total": 1670.4845493450134, "count": 63771, "is_parallel": true, "self": 45.270169335052515, "children": { "UnityEnvironment._generate_step_input": { "total": 27.267983590027598, "count": 63771, "is_parallel": true, "self": 27.267983590027598 }, "communicator.exchange": { "total": 1476.7229670189681, "count": 63771, "is_parallel": true, "self": 1476.7229670189681 }, "steps_from_proto": { "total": 121.22342940096519, "count": 63771, "is_parallel": true, "self": 26.08005731287369, "children": { "_process_rank_one_or_two_observation": { "total": 95.1433720880915, "count": 510168, "is_parallel": true, "self": 95.1433720880915 } } } } } } } } } } }, "trainer_advance": { "total": 786.2293475979726, "count": 63772, "self": 3.8372939029196687, "children": { "process_trajectory": { "total": 155.02467574205343, "count": 63772, "self": 154.73896617205332, "children": { "RLTrainer._checkpoint": { "total": 0.2857095700001082, "count": 2, "self": 0.2857095700001082 } } }, "_update_policy": { "total": 627.3673779529995, "count": 455, "self": 367.5116445869726, "children": { "TorchPPOOptimizer.update": { "total": 259.8557333660269, "count": 22809, "self": 259.8557333660269 } } } } } } }, "trainer_threads": { "total": 1.0819999261002522e-06, "count": 1, "self": 1.0819999261002522e-06 }, "TrainerController._save_models": { "total": 0.10061338700006672, "count": 1, "self": 0.0020008699998470547, "children": { "RLTrainer._checkpoint": { "total": 0.09861251700021967, "count": 1, "self": 0.09861251700021967 } } } } } } }