{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.27660733461380005, "min": 0.27660733461380005, "max": 0.6970478296279907, "count": 21 }, "Pyramids.Policy.Entropy.sum": { "value": 8227.408203125, "min": 8227.408203125, "max": 21101.03125, "count": 21 }, "Pyramids.Step.mean": { "value": 1619896.0, "min": 1019972.0, "max": 1619896.0, "count": 21 }, "Pyramids.Step.sum": { "value": 1619896.0, "min": 1019972.0, "max": 1619896.0, "count": 21 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.592436671257019, "min": 0.17887116968631744, "max": 0.5980270504951477, "count": 21 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 169.4368896484375, "min": 38.88249206542969, "max": 169.4368896484375, "count": 21 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.010919425636529922, "min": -0.01185104250907898, "max": 0.07567622512578964, "count": 21 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 3.122955799102783, "min": -3.176079273223877, "max": 20.810962677001953, "count": 21 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 304.00943396226415, "min": 304.00943396226415, "max": 704.9761904761905, "count": 21 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 32225.0, "min": 15375.0, "max": 33061.0, "count": 21 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.6582433830876395, "min": 0.46615810692310333, "max": 1.6687587479647903, "count": 21 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 175.7737986072898, "min": 20.044798597693443, "max": 175.7737986072898, "count": 21 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.6582433830876395, "min": 0.46615810692310333, "max": 1.6687587479647903, "count": 21 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 175.7737986072898, "min": 20.044798597693443, "max": 175.7737986072898, "count": 21 }, "Pyramids.Policy.RndReward.mean": { "value": 0.02843567615113812, "min": 0.02843567615113812, "max": 0.09740542004366817, "count": 21 }, "Pyramids.Policy.RndReward.sum": { "value": 3.0141816720206407, "min": 2.240261279977858, "max": 4.188433061877731, "count": 21 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06440481870612573, "min": 0.06440481870612573, "max": 0.07214185008771558, "count": 21 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9016674618857602, "min": 0.6178666375965501, "max": 1.0821277513157337, "count": 21 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.0158456465358525, "min": 0.007521672538129859, "max": 0.016608913912759385, "count": 21 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.221839051501935, "min": 0.0910664204234005, "max": 0.24370129945015712, "count": 21 }, "Pyramids.Policy.LearningRate.mean": { "value": 0.00013951353920979047, "min": 0.00013951353920979047, "max": 0.00019892394480314075, "count": 21 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.0019531895489370665, "min": 0.0017903155032282668, "max": 0.0027675777774742666, "count": 21 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.14650449523809522, "min": 0.14650449523809522, "max": 0.16630797037037037, "count": 21 }, "Pyramids.Policy.Epsilon.sum": { "value": 2.0510629333333332, "min": 1.4967717333333335, "max": 2.422525733333334, "count": 21 }, "Pyramids.Policy.Beta.mean": { "value": 0.004655799074285713, "min": 0.004655799074285713, "max": 0.006634166239999999, "count": 21 }, "Pyramids.Policy.Beta.sum": { "value": 0.06518118703999999, "min": 0.05970749616, "max": 0.09231032076000001, "count": 21 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.008971639908850193, "min": 0.008432705886662006, "max": 0.01435074396431446, "count": 21 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.12560296058654785, "min": 0.11805787682533264, "max": 0.1869449019432068, "count": 21 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 21 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 21 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1710476387", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1710477946" }, "total": 1558.879617263, "count": 1, "self": 0.6296550729998671, "children": { "run_training.setup": { "total": 0.08422484399989116, "count": 1, "self": 0.08422484399989116 }, "TrainerController.start_learning": { "total": 1558.1657373460002, "count": 1, "self": 1.012262123995697, "children": { "TrainerController._reset_env": { "total": 2.2892777010001737, "count": 1, "self": 2.2892777010001737 }, "TrainerController.advance": { "total": 1554.8604768780046, "count": 40547, "self": 1.0200760621187328, "children": { "env_step": { "total": 1142.0261885059954, "count": 40547, "self": 1051.803102436962, "children": { "SubprocessEnvManager._take_step": { "total": 89.596789138031, "count": 40547, "self": 3.2497139430465722, "children": { "TorchPolicy.evaluate": { "total": 86.34707519498443, "count": 39320, "self": 86.34707519498443 } } }, "workers": { "total": 0.6262969310023436, "count": 40546, "self": 0.0, "children": { "worker_root": { "total": 1554.5629688260428, "count": 40546, "is_parallel": true, "self": 587.4225106940371, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002216930000031425, "count": 1, "is_parallel": true, "self": 0.0006426989998544741, "children": { "_process_rank_one_or_two_observation": { "total": 0.001574231000176951, "count": 8, "is_parallel": true, "self": 0.001574231000176951 } } }, "UnityEnvironment.step": { "total": 0.05006574899971383, "count": 1, "is_parallel": true, "self": 0.0006695329998365196, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0004964290001225891, "count": 1, "is_parallel": true, "self": 0.0004964290001225891 }, "communicator.exchange": { "total": 0.04715151199980028, "count": 1, "is_parallel": true, "self": 0.04715151199980028 }, "steps_from_proto": { "total": 0.0017482749999544467, "count": 1, "is_parallel": true, "self": 0.0003777639994950732, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013705110004593735, "count": 8, "is_parallel": true, "self": 0.0013705110004593735 } } } } } } }, "UnityEnvironment.step": { "total": 967.1404581320057, "count": 40545, "is_parallel": true, "self": 23.175993409091006, "children": { "UnityEnvironment._generate_step_input": { "total": 16.720818008942842, "count": 40545, "is_parallel": true, "self": 16.720818008942842 }, "communicator.exchange": { "total": 856.9368456759912, "count": 40545, "is_parallel": true, "self": 856.9368456759912 }, "steps_from_proto": { "total": 70.30680103798068, "count": 40545, "is_parallel": true, "self": 14.780989722108643, "children": { "_process_rank_one_or_two_observation": { "total": 55.52581131587203, "count": 324360, "is_parallel": true, "self": 55.52581131587203 } } } } } } } } } } }, "trainer_advance": { "total": 411.81421230989054, "count": 40546, "self": 2.0163928877691433, "children": { "process_trajectory": { "total": 87.82973507513134, "count": 40546, "self": 87.69651986713097, "children": { "RLTrainer._checkpoint": { "total": 0.13321520800036524, "count": 1, "self": 0.13321520800036524 } } }, "_update_policy": { "total": 321.96808434699005, "count": 296, "self": 189.38236025700735, "children": { "TorchPPOOptimizer.update": { "total": 132.5857240899827, "count": 14277, "self": 132.5857240899827 } } } } } } }, "trainer_threads": { "total": 1.2219998097862117e-06, "count": 1, "self": 1.2219998097862117e-06 }, "TrainerController._save_models": { "total": 0.0037194209999142913, "count": 1, "self": 2.6368999897385947e-05, "children": { "RLTrainer._checkpoint": { "total": 0.0036930520000169054, "count": 1, "self": 0.0036930520000169054 } } } } } } }