{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.3333994746208191, "min": 0.3333994746208191, "max": 1.434740424156189, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 9991.3154296875, "min": 9991.3154296875, "max": 43524.28515625, "count": 33 }, "Pyramids.Step.mean": { "value": 989877.0, "min": 29915.0, "max": 989877.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989877.0, "min": 29915.0, "max": 989877.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.5442467331886292, "min": -0.08237089961767197, "max": 0.6127274632453918, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 145.31387329101562, "min": -20.016128540039062, "max": 175.6463623046875, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": -0.0011250348761677742, "min": -0.003884807461872697, "max": 0.3590558171272278, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": -0.3003843128681183, "min": -1.1071701049804688, "max": 86.53245544433594, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06948406615324518, "min": 0.06416916676615883, "max": 0.0727442685962333, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9727769261454324, "min": 0.4934779911696103, "max": 1.0533140792098243, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.015369856923273034, "min": 0.0016885011700466794, "max": 0.01703454028063875, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.21517799692582248, "min": 0.018573512870513474, "max": 0.24812979035977703, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.6028260371857134e-06, "min": 7.6028260371857134e-06, "max": 0.0002952345015885, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00010643956452059999, "min": 0.00010643956452059999, "max": 0.003383265272245, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10253424285714285, "min": 0.10253424285714285, "max": 0.1984115, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4354794, "min": 1.3888805, "max": 2.5727256000000005, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.0002631708614285714, "min": 0.0002631708614285714, "max": 0.00984130885, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.0036843920599999996, "min": 0.0036843920599999996, "max": 0.1128027245, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.015523058362305164, "min": 0.015523058362305164, "max": 0.5624565482139587, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.21732281148433685, "min": 0.21732281148433685, "max": 3.9371957778930664, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 367.62820512820514, "min": 292.3883495145631, "max": 996.4, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 28675.0, "min": 16714.0, "max": 32660.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.5297589515073178, "min": -0.930666717638572, "max": 1.6507521547863018, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 119.32119821757078, "min": -30.67740160226822, "max": 168.17899836599827, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.5297589515073178, "min": -0.930666717638572, "max": 1.6507521547863018, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 119.32119821757078, "min": -30.67740160226822, "max": 168.17899836599827, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.05917927180491507, "min": 0.04738622474143235, "max": 10.940652040874257, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 4.615983200783376, "min": 4.615983200783376, "max": 185.99108469486237, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1695764057", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1695766511" }, "total": 2453.3441720309997, "count": 1, "self": 0.5282413029995041, "children": { "run_training.setup": { "total": 0.043107044999942445, "count": 1, "self": 0.043107044999942445 }, "TrainerController.start_learning": { "total": 2452.7728236830003, "count": 1, "self": 1.8033336689318276, "children": { "TrainerController._reset_env": { "total": 4.553713423999852, "count": 1, "self": 4.553713423999852 }, "TrainerController.advance": { "total": 2446.3169065820684, "count": 63920, "self": 1.6987317010552943, "children": { "env_step": { "total": 1755.709652899966, "count": 63920, "self": 1626.5032682610224, "children": { "SubprocessEnvManager._take_step": { "total": 128.16866709494207, "count": 63920, "self": 5.277063267004678, "children": { "TorchPolicy.evaluate": { "total": 122.89160382793739, "count": 62554, "self": 122.89160382793739 } } }, "workers": { "total": 1.0377175440014526, "count": 63920, "self": 0.0, "children": { "worker_root": { "total": 2446.395532611032, "count": 63920, "is_parallel": true, "self": 951.1587023530087, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0017659809998349374, "count": 1, "is_parallel": true, "self": 0.0005599969997547305, "children": { "_process_rank_one_or_two_observation": { "total": 0.001205984000080207, "count": 8, "is_parallel": true, "self": 0.001205984000080207 } } }, "UnityEnvironment.step": { "total": 0.07597985999996126, "count": 1, "is_parallel": true, "self": 0.0006119990000570397, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00047514999982922745, "count": 1, "is_parallel": true, "self": 0.00047514999982922745 }, "communicator.exchange": { "total": 0.0722329110001283, "count": 1, "is_parallel": true, "self": 0.0722329110001283 }, "steps_from_proto": { "total": 0.0026597999999466992, "count": 1, "is_parallel": true, "self": 0.0003412770001887111, "children": { "_process_rank_one_or_two_observation": { "total": 0.002318522999757988, "count": 8, "is_parallel": true, "self": 0.002318522999757988 } } } } } } }, "UnityEnvironment.step": { "total": 1495.2368302580235, "count": 63919, "is_parallel": true, "self": 36.52268037501176, "children": { "UnityEnvironment._generate_step_input": { "total": 24.067441398020947, "count": 63919, "is_parallel": true, "self": 24.067441398020947 }, "communicator.exchange": { "total": 1321.2240849269997, "count": 63919, "is_parallel": true, "self": 1321.2240849269997 }, "steps_from_proto": { "total": 113.42262355799107, "count": 63919, "is_parallel": true, "self": 23.155506479956557, "children": { "_process_rank_one_or_two_observation": { "total": 90.26711707803452, "count": 511352, "is_parallel": true, "self": 90.26711707803452 } } } } } } } } } } }, "trainer_advance": { "total": 688.908521981047, "count": 63920, "self": 3.335679834122402, "children": { "process_trajectory": { "total": 117.06676702192544, "count": 63920, "self": 116.8351561209256, "children": { "RLTrainer._checkpoint": { "total": 0.23161090099983994, "count": 2, "self": 0.23161090099983994 } } }, "_update_policy": { "total": 568.5060751249991, "count": 456, "self": 369.8169946920516, "children": { "TorchPPOOptimizer.update": { "total": 198.68908043294755, "count": 22803, "self": 198.68908043294755 } } } } } } }, "trainer_threads": { "total": 1.258999873243738e-06, "count": 1, "self": 1.258999873243738e-06 }, "TrainerController._save_models": { "total": 0.09886874900030307, "count": 1, "self": 0.001423507000254176, "children": { "RLTrainer._checkpoint": { "total": 0.09744524200004889, "count": 1, "self": 0.09744524200004889 } } } } } } }