{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.2649247646331787, "min": 0.2649247646331787, "max": 1.4459623098373413, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 7939.265625, "min": 7939.265625, "max": 43864.7109375, "count": 33 }, "Pyramids.Step.mean": { "value": 989993.0, "min": 29933.0, "max": 989993.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989993.0, "min": 29933.0, "max": 989993.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.6377519965171814, "min": -0.09302067756652832, "max": 0.7391823530197144, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 183.03482055664062, "min": -22.511003494262695, "max": 214.36288452148438, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.003492568386718631, "min": -0.010945435613393784, "max": 0.23402667045593262, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 1.0023671388626099, "min": -3.1194491386413574, "max": 56.63445281982422, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.07157198179888331, "min": 0.06480402557644993, "max": 0.07170581122428565, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 1.0735797269832497, "min": 0.5694369119647493, "max": 1.0735797269832497, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.01634289381779834, "min": 0.0014038295674312059, "max": 0.018017845771056, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.2451434072669751, "min": 0.01684595480917447, "max": 0.252249840794784, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.513457495546667e-06, "min": 7.513457495546667e-06, "max": 0.0002948421392192875, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00011270186243320001, "min": 0.00011270186243320001, "max": 0.0036321517892827994, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10250445333333331, "min": 0.10250445333333331, "max": 0.1982807125, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.5375667999999998, "min": 1.4779269000000002, "max": 2.6107172000000007, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.000260194888, "min": 0.000260194888, "max": 0.00982824317875, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.00390292332, "min": 0.00390292332, "max": 0.12109064828, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.013383041135966778, "min": 0.013383041135966778, "max": 0.4778611660003662, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.2007456123828888, "min": 0.2007456123828888, "max": 3.8228893280029297, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 300.76, "min": 258.05405405405406, "max": 986.030303030303, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 30076.0, "min": 16348.0, "max": 33569.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.6792319916188716, "min": -0.9263091426004063, "max": 1.733620674317253, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 167.92319916188717, "min": -30.568201705813408, "max": 201.09999822080135, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.6792319916188716, "min": -0.9263091426004063, "max": 1.733620674317253, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 167.92319916188717, "min": -30.568201705813408, "max": 201.09999822080135, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.041807469085033515, "min": 0.038200701576414274, "max": 9.750102219336172, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 4.180746908503352, "min": 4.180746908503352, "max": 165.75173772871494, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1718033059", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.3.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1718035608" }, "total": 2548.822866124, "count": 1, "self": 0.4814605120000124, "children": { "run_training.setup": { "total": 0.051430090999929234, "count": 1, "self": 0.051430090999929234 }, "TrainerController.start_learning": { "total": 2548.289975521, "count": 1, "self": 1.660366831997635, "children": { "TrainerController._reset_env": { "total": 2.2435048969996387, "count": 1, "self": 2.2435048969996387 }, "TrainerController.advance": { "total": 2544.301462038003, "count": 64311, "self": 1.7874230850197819, "children": { "env_step": { "total": 1887.9516203970206, "count": 64311, "self": 1735.118529123064, "children": { "SubprocessEnvManager._take_step": { "total": 151.79271454305308, "count": 64311, "self": 5.4698654800245095, "children": { "TorchPolicy.evaluate": { "total": 146.32284906302857, "count": 62559, "self": 146.32284906302857 } } }, "workers": { "total": 1.0403767309035175, "count": 64311, "self": 0.0, "children": { "worker_root": { "total": 2542.546178495889, "count": 64311, "is_parallel": true, "self": 946.4521659428151, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002909234000071592, "count": 1, "is_parallel": true, "self": 0.0009579420002410188, "children": { "_process_rank_one_or_two_observation": { "total": 0.0019512919998305733, "count": 8, "is_parallel": true, "self": 0.0019512919998305733 } } }, "UnityEnvironment.step": { "total": 0.05080635100011932, "count": 1, "is_parallel": true, "self": 0.0006137309992482187, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00046726700020371936, "count": 1, "is_parallel": true, "self": 0.00046726700020371936 }, "communicator.exchange": { "total": 0.04806707100033236, "count": 1, "is_parallel": true, "self": 0.04806707100033236 }, "steps_from_proto": { "total": 0.0016582820003350207, "count": 1, "is_parallel": true, "self": 0.00034899800084531307, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013092839994897076, "count": 8, "is_parallel": true, "self": 0.0013092839994897076 } } } } } } }, "UnityEnvironment.step": { "total": 1596.0940125530738, "count": 64310, "is_parallel": true, "self": 37.86749360003887, "children": { "UnityEnvironment._generate_step_input": { "total": 25.804681709944816, "count": 64310, "is_parallel": true, "self": 25.804681709944816 }, "communicator.exchange": { "total": 1423.8839873461134, "count": 64310, "is_parallel": true, "self": 1423.8839873461134 }, "steps_from_proto": { "total": 108.5378498969767, "count": 64310, "is_parallel": true, "self": 22.887701857774573, "children": { "_process_rank_one_or_two_observation": { "total": 85.65014803920212, "count": 514480, "is_parallel": true, "self": 85.65014803920212 } } } } } } } } } } }, "trainer_advance": { "total": 654.5624185559627, "count": 64311, "self": 3.2653390199352543, "children": { "process_trajectory": { "total": 136.51986482003213, "count": 64311, "self": 136.31174120903233, "children": { "RLTrainer._checkpoint": { "total": 0.2081236109997917, "count": 2, "self": 0.2081236109997917 } } }, "_update_policy": { "total": 514.7772147159953, "count": 458, "self": 304.05304449302275, "children": { "TorchPPOOptimizer.update": { "total": 210.7241702229726, "count": 22758, "self": 210.7241702229726 } } } } } } }, "trainer_threads": { "total": 9.149998732027598e-07, "count": 1, "self": 9.149998732027598e-07 }, "TrainerController._save_models": { "total": 0.08464083899980324, "count": 1, "self": 0.0013628069991682423, "children": { "RLTrainer._checkpoint": { "total": 0.083278032000635, "count": 1, "self": 0.083278032000635 } } } } } } }