{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.37025120854377747, "min": 0.37025120854377747, "max": 1.4247376918792725, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 11137.15625, "min": 11137.15625, "max": 43220.84375, "count": 33 }, "Pyramids.Step.mean": { "value": 989953.0, "min": 29952.0, "max": 989953.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989953.0, "min": 29952.0, "max": 989953.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.5521695017814636, "min": -0.08244267106056213, "max": 0.5638489127159119, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 152.39878845214844, "min": -20.0335693359375, "max": 159.56924438476562, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.01970941573381424, "min": -0.07270963490009308, "max": 0.27276507019996643, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 5.439798831939697, "min": -19.55889129638672, "max": 65.73638153076172, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.07420514849668725, "min": 0.06523757526354389, "max": 0.07420514849668725, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 1.0388720789536214, "min": 0.5092668565329372, "max": 1.0405694968574999, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.0148107825935724, "min": 0.0010748059717093266, "max": 0.017208561306571165, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.2073509563100136, "min": 0.013972477632221246, "max": 0.2424285946808774, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.103017463242857e-06, "min": 7.103017463242857e-06, "max": 0.0002754739216164571, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 9.94422444854e-05, "min": 9.94422444854e-05, "max": 0.0032586471361974994, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10253675714285715, "min": 0.10253675714285715, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4355146, "min": 1.3886848, "max": 2.5726818999999996, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.0002634220385714286, "min": 0.0002634220385714286, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.00368790854, "min": 0.00368790854, "max": 0.11639386975000003, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.008715992793440819, "min": 0.008715992793440819, "max": 0.43272796273231506, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.12202390283346176, "min": 0.12202390283346176, "max": 3.0290956497192383, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 344.33720930232556, "min": 335.7391304347826, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 29613.0, "min": 15984.0, "max": 33557.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.6556627715742864, "min": -1.0000000521540642, "max": 1.6556627715742864, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 142.38699835538864, "min": -28.094001598656178, "max": 149.11039811372757, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.6556627715742864, "min": -1.0000000521540642, "max": 1.6556627715742864, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 142.38699835538864, "min": -28.094001598656178, "max": 149.11039811372757, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.031124308354730262, "min": 0.031124308354730262, "max": 8.45088638458401, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 2.6766905185068026, "min": 2.6766905185068026, "max": 135.21418215334415, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1721412680", "python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.3.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1721414872" }, "total": 2191.7595605180004, "count": 1, "self": 0.9458149910005886, "children": { "run_training.setup": { "total": 0.056423658999960935, "count": 1, "self": 0.056423658999960935 }, "TrainerController.start_learning": { "total": 2190.757321868, "count": 1, "self": 1.3613470330042219, "children": { "TrainerController._reset_env": { "total": 3.3791117849999637, "count": 1, "self": 3.3791117849999637 }, "TrainerController.advance": { "total": 2185.908952396996, "count": 63914, "self": 1.3842095140425954, "children": { "env_step": { "total": 1543.790822813956, "count": 63914, "self": 1407.16823220886, "children": { "SubprocessEnvManager._take_step": { "total": 135.79019208209775, "count": 63914, "self": 4.699269493117754, "children": { "TorchPolicy.evaluate": { "total": 131.09092258898, "count": 62567, "self": 131.09092258898 } } }, "workers": { "total": 0.8323985229984601, "count": 63914, "self": 0.0, "children": { "worker_root": { "total": 2186.144196754022, "count": 63914, "is_parallel": true, "self": 900.5784019280386, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.006589413000028799, "count": 1, "is_parallel": true, "self": 0.004554475000190905, "children": { "_process_rank_one_or_two_observation": { "total": 0.0020349379998378936, "count": 8, "is_parallel": true, "self": 0.0020349379998378936 } } }, "UnityEnvironment.step": { "total": 0.04904677999991236, "count": 1, "is_parallel": true, "self": 0.0006223949998229727, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0005007089999935488, "count": 1, "is_parallel": true, "self": 0.0005007089999935488 }, "communicator.exchange": { "total": 0.0460333130000663, "count": 1, "is_parallel": true, "self": 0.0460333130000663 }, "steps_from_proto": { "total": 0.001890363000029538, "count": 1, "is_parallel": true, "self": 0.0003703249999489344, "children": { "_process_rank_one_or_two_observation": { "total": 0.0015200380000806035, "count": 8, "is_parallel": true, "self": 0.0015200380000806035 } } } } } } }, "UnityEnvironment.step": { "total": 1285.5657948259832, "count": 63913, "is_parallel": true, "self": 33.16353975300353, "children": { "UnityEnvironment._generate_step_input": { "total": 23.756890140975315, "count": 63913, "is_parallel": true, "self": 23.756890140975315 }, "communicator.exchange": { "total": 1130.2263285919998, "count": 63913, "is_parallel": true, "self": 1130.2263285919998 }, "steps_from_proto": { "total": 98.41903634000437, "count": 63913, "is_parallel": true, "self": 20.146207187152754, "children": { "_process_rank_one_or_two_observation": { "total": 78.27282915285161, "count": 511304, "is_parallel": true, "self": 78.27282915285161 } } } } } } } } } } }, "trainer_advance": { "total": 640.7339200689972, "count": 63914, "self": 2.6640937119613, "children": { "process_trajectory": { "total": 129.89786223103215, "count": 63914, "self": 129.62650964303202, "children": { "RLTrainer._checkpoint": { "total": 0.27135258800012707, "count": 2, "self": 0.27135258800012707 } } }, "_update_policy": { "total": 508.1719641260038, "count": 453, "self": 292.85083411696166, "children": { "TorchPPOOptimizer.update": { "total": 215.32113000904212, "count": 22797, "self": 215.32113000904212 } } } } } } }, "trainer_threads": { "total": 1.6129997675307095e-06, "count": 1, "self": 1.6129997675307095e-06 }, "TrainerController._save_models": { "total": 0.10790903999986767, "count": 1, "self": 0.0014100619996497699, "children": { "RLTrainer._checkpoint": { "total": 0.1064989780002179, "count": 1, "self": 0.1064989780002179 } } } } } } }