{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.41532936692237854, "min": 0.41532936692237854, "max": 1.4610974788665771, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 12612.72265625, "min": 12612.72265625, "max": 44323.8515625, "count": 33 }, "Pyramids.Step.mean": { "value": 989997.0, "min": 29952.0, "max": 989997.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989997.0, "min": 29952.0, "max": 989997.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.22085046768188477, "min": -0.16713948547840118, "max": 0.22085046768188477, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 56.5377197265625, "min": -39.612056732177734, "max": 56.5377197265625, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.07109145820140839, "min": 0.007225059904158115, "max": 0.4674621522426605, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 18.199413299560547, "min": 1.7845897674560547, "max": 110.78852844238281, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.07047669774536709, "min": 0.06543724591640301, "max": 0.07401030313519928, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9866737684351392, "min": 0.4839584918135052, "max": 1.0641597351150625, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.011617206489007341, "min": 0.00018257017461247477, "max": 0.011617206489007341, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.16264089084610278, "min": 0.0012779912222873233, "max": 0.16264089084610278, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.612276034035713e-06, "min": 7.612276034035713e-06, "max": 0.00029515063018788575, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00010657186447649998, "min": 0.00010657186447649998, "max": 0.003127518357494, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10253739285714285, "min": 0.10253739285714285, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4355235, "min": 1.327104, "max": 2.442506, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.00026348554642857143, "min": 0.00026348554642857143, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.00368879765, "min": 0.00368879765, "max": 0.10428634939999999, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.011858736164867878, "min": 0.011858736164867878, "max": 0.4522216022014618, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.16602230072021484, "min": 0.16602230072021484, "max": 3.16555118560791, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 609.68, "min": 609.68, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 30484.0, "min": 15984.0, "max": 33922.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 0.9101759646832943, "min": -1.0000000521540642, "max": 0.9101759646832943, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 45.508798234164715, "min": -32.000001668930054, "max": 45.508798234164715, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 0.9101759646832943, "min": -1.0000000521540642, "max": 0.9101759646832943, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 45.508798234164715, "min": -32.000001668930054, "max": 45.508798234164715, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.07606960660777987, "min": 0.07606960660777987, "max": 8.988390937447548, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 3.803480330388993, "min": 3.736572267836891, "max": 143.81425499916077, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1660088588", "python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1660090551" }, "total": 1962.7637415150002, "count": 1, "self": 0.4828909350003414, "children": { "run_training.setup": { "total": 0.042640785000003234, "count": 1, "self": 0.042640785000003234 }, "TrainerController.start_learning": { "total": 1962.2382097949999, "count": 1, "self": 1.3612930209847036, "children": { "TrainerController._reset_env": { "total": 9.941774627999962, "count": 1, "self": 9.941774627999962 }, "TrainerController.advance": { "total": 1950.8428995670147, "count": 63366, "self": 1.4818707500580786, "children": { "env_step": { "total": 1235.5351816630027, "count": 63366, "self": 1127.1270648160214, "children": { "SubprocessEnvManager._take_step": { "total": 107.67063594498734, "count": 63366, "self": 4.7632961079904135, "children": { "TorchPolicy.evaluate": { "total": 102.90733983699693, "count": 62568, "self": 35.322871082947245, "children": { "TorchPolicy.sample_actions": { "total": 67.58446875404968, "count": 62568, "self": 67.58446875404968 } } } } }, "workers": { "total": 0.7374809019939903, "count": 63366, "self": 0.0, "children": { "worker_root": { "total": 1958.2127912670126, "count": 63366, "is_parallel": true, "self": 935.5557843619393, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.005451580999988437, "count": 1, "is_parallel": true, "self": 0.003598217000103432, "children": { "_process_rank_one_or_two_observation": { "total": 0.0018533639998850049, "count": 8, "is_parallel": true, "self": 0.0018533639998850049 } } }, "UnityEnvironment.step": { "total": 0.05165570500003014, "count": 1, "is_parallel": true, "self": 0.0005141319999211191, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0004980549999800132, "count": 1, "is_parallel": true, "self": 0.0004980549999800132 }, "communicator.exchange": { "total": 0.04887545000008231, "count": 1, "is_parallel": true, "self": 0.04887545000008231 }, "steps_from_proto": { "total": 0.0017680680000466964, "count": 1, "is_parallel": true, "self": 0.0004853980001371383, "children": { "_process_rank_one_or_two_observation": { "total": 0.001282669999909558, "count": 8, "is_parallel": true, "self": 0.001282669999909558 } } } } } } }, "UnityEnvironment.step": { "total": 1022.6570069050732, "count": 63365, "is_parallel": true, "self": 27.460449131097675, "children": { "UnityEnvironment._generate_step_input": { "total": 24.046237155980748, "count": 63365, "is_parallel": true, "self": 24.046237155980748 }, "communicator.exchange": { "total": 876.0644355899757, "count": 63365, "is_parallel": true, "self": 876.0644355899757 }, "steps_from_proto": { "total": 95.0858850280191, "count": 63365, "is_parallel": true, "self": 23.720250741921177, "children": { "_process_rank_one_or_two_observation": { "total": 71.36563428609793, "count": 506920, "is_parallel": true, "self": 71.36563428609793 } } } } } } } } } } }, "trainer_advance": { "total": 713.825847153954, "count": 63366, "self": 2.613098152948851, "children": { "process_trajectory": { "total": 168.66315415400334, "count": 63366, "self": 168.459523092003, "children": { "RLTrainer._checkpoint": { "total": 0.20363106200034053, "count": 2, "self": 0.20363106200034053 } } }, "_update_policy": { "total": 542.5495948470018, "count": 435, "self": 214.3418086120431, "children": { "TorchPPOOptimizer.update": { "total": 328.2077862349587, "count": 22878, "self": 328.2077862349587 } } } } } } }, "trainer_threads": { "total": 1.2770001376338769e-06, "count": 1, "self": 1.2770001376338769e-06 }, "TrainerController._save_models": { "total": 0.09224130200027503, "count": 1, "self": 0.0016490870002598967, "children": { "RLTrainer._checkpoint": { "total": 0.09059221500001513, "count": 1, "self": 0.09059221500001513 } } } } } } }