{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.3720674216747284, "min": 0.36569157242774963, "max": 1.4271082878112793, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 11173.9287109375, "min": 10953.1943359375, "max": 43292.7578125, "count": 33 }, "Pyramids.Step.mean": { "value": 989897.0, "min": 29933.0, "max": 989897.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989897.0, "min": 29933.0, "max": 989897.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.5588292479515076, "min": -0.11399835348129272, "max": 0.641019880771637, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 155.35452270507812, "min": -27.587600708007812, "max": 180.76760864257812, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": -0.0019860579632222652, "min": -0.026568084955215454, "max": 0.24919602274894714, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": -0.5521240830421448, "min": -7.335100173950195, "max": 60.05624008178711, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06483777577629123, "min": 0.06445962623499023, "max": 0.07609688160871805, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9077288608680772, "min": 0.5326781712610263, "max": 1.0781960766083405, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.01577160184873113, "min": 0.001168779642284457, "max": 0.01577160184873113, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.2208024258822358, "min": 0.016362914991982398, "max": 0.2208024258822358, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.288704713321428e-06, "min": 7.288704713321428e-06, "max": 0.00029515063018788575, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00010204186598649999, "min": 0.00010204186598649999, "max": 0.0033830057723315005, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10242953571428572, "min": 0.10242953571428572, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4340135, "min": 1.3886848, "max": 2.5276685000000003, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.0002527106178571429, "min": 0.0002527106178571429, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.0035379486500000004, "min": 0.0035379486500000004, "max": 0.11279408314999999, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.00982207152992487, "min": 0.009790536016225815, "max": 0.4574347138404846, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.13750900328159332, "min": 0.13750900328159332, "max": 3.202043056488037, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 341.17977528089887, "min": 307.0851063829787, "max": 991.7647058823529, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 30365.0, "min": 16860.0, "max": 34203.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.6390377630790074, "min": -0.8750588771174935, "max": 1.6692906782724137, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 147.51339867711067, "min": -26.200601562857628, "max": 158.20539818704128, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.6390377630790074, "min": -0.8750588771174935, "max": 1.6692906782724137, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 147.51339867711067, "min": -26.200601562857628, "max": 158.20539818704128, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.03468285412786321, "min": 0.03184552979717487, "max": 8.170342303374234, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 3.1214568715076894, "min": 2.9934798009344377, "max": 138.89581915736198, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1704165348", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.1.2+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1704167804" }, "total": 2456.0244198809996, "count": 1, "self": 0.4767193879997649, "children": { "run_training.setup": { "total": 0.04809449999993376, "count": 1, "self": 0.04809449999993376 }, "TrainerController.start_learning": { "total": 2455.499605993, "count": 1, "self": 1.6442760230283966, "children": { "TrainerController._reset_env": { "total": 2.1799283059999652, "count": 1, "self": 2.1799283059999652 }, "TrainerController.advance": { "total": 2451.5862082099716, "count": 63931, "self": 1.6271036638950136, "children": { "env_step": { "total": 1784.107003899041, "count": 63931, "self": 1640.0207108440673, "children": { "SubprocessEnvManager._take_step": { "total": 143.04698452497837, "count": 63931, "self": 5.263825784918481, "children": { "TorchPolicy.evaluate": { "total": 137.7831587400599, "count": 62553, "self": 137.7831587400599 } } }, "workers": { "total": 1.0393085299954237, "count": 63931, "self": 0.0, "children": { "worker_root": { "total": 2449.580418427044, "count": 63931, "is_parallel": true, "self": 941.9877172431318, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0017395369998212118, "count": 1, "is_parallel": true, "self": 0.0005475259995364468, "children": { "_process_rank_one_or_two_observation": { "total": 0.001192011000284765, "count": 8, "is_parallel": true, "self": 0.001192011000284765 } } }, "UnityEnvironment.step": { "total": 0.04857943399997566, "count": 1, "is_parallel": true, "self": 0.0005760730000474723, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0004808080000202608, "count": 1, "is_parallel": true, "self": 0.0004808080000202608 }, "communicator.exchange": { "total": 0.04582754099988051, "count": 1, "is_parallel": true, "self": 0.04582754099988051 }, "steps_from_proto": { "total": 0.0016950120000274183, "count": 1, "is_parallel": true, "self": 0.00035172100001545914, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013432910000119591, "count": 8, "is_parallel": true, "self": 0.0013432910000119591 } } } } } } }, "UnityEnvironment.step": { "total": 1507.5927011839124, "count": 63930, "is_parallel": true, "self": 37.67545316890573, "children": { "UnityEnvironment._generate_step_input": { "total": 25.309603416004165, "count": 63930, "is_parallel": true, "self": 25.309603416004165 }, "communicator.exchange": { "total": 1339.017174828038, "count": 63930, "is_parallel": true, "self": 1339.017174828038 }, "steps_from_proto": { "total": 105.59046977096455, "count": 63930, "is_parallel": true, "self": 22.086678532949463, "children": { "_process_rank_one_or_two_observation": { "total": 83.50379123801508, "count": 511440, "is_parallel": true, "self": 83.50379123801508 } } } } } } } } } } }, "trainer_advance": { "total": 665.8521006470355, "count": 63931, "self": 3.120638049033232, "children": { "process_trajectory": { "total": 133.89441279900393, "count": 63931, "self": 133.68657383400387, "children": { "RLTrainer._checkpoint": { "total": 0.2078389650000645, "count": 2, "self": 0.2078389650000645 } } }, "_update_policy": { "total": 528.8370497989984, "count": 456, "self": 312.78919133302406, "children": { "TorchPPOOptimizer.update": { "total": 216.0478584659743, "count": 22839, "self": 216.0478584659743 } } } } } } }, "trainer_threads": { "total": 8.709998837730382e-07, "count": 1, "self": 8.709998837730382e-07 }, "TrainerController._save_models": { "total": 0.08919258300011279, "count": 1, "self": 0.0016065030004028813, "children": { "RLTrainer._checkpoint": { "total": 0.08758607999970991, "count": 1, "self": 0.08758607999970991 } } } } } } }