{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 0.8408380150794983, "min": 0.8408380150794983, "max": 2.8623106479644775, "count": 20 }, "SnowballTarget.Policy.Entropy.sum": { "value": 8037.5703125, "min": 8037.5703125, "max": 29312.923828125, "count": 20 }, "SnowballTarget.Step.mean": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Step.sum": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 12.886287689208984, "min": 0.4095931351184845, "max": 12.886287689208984, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 2512.826171875, "min": 79.46106719970703, "max": 2591.47509765625, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 8756.0, "min": 8756.0, "max": 10945.0, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.06358247270414794, "min": 0.06005714413956465, "max": 0.07260323546292699, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.25432989081659174, "min": 0.2402285765582586, "max": 0.3586133790687004, "count": 20 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.1973805659103627, "min": 0.10018396412259808, "max": 0.2929309854174362, "count": 20 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.7895222636414508, "min": 0.40073585649039234, "max": 1.3935093800811207, "count": 20 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 8.082097306000005e-06, "min": 8.082097306000005e-06, "max": 0.000291882002706, "count": 20 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 3.232838922400002e-05, "min": 3.232838922400002e-05, "max": 0.00138516003828, "count": 20 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.10269400000000001, "min": 0.10269400000000001, "max": 0.19729400000000002, "count": 20 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.41077600000000003, "min": 0.41077600000000003, "max": 0.96172, "count": 20 }, "SnowballTarget.Policy.Beta.mean": { "value": 0.0001444306000000001, "min": 0.0001444306000000001, "max": 0.0048649706, "count": 20 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.0005777224000000004, "min": 0.0005777224000000004, "max": 0.023089828, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 25.5, "min": 3.0681818181818183, "max": 25.5, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 1122.0, "min": 135.0, "max": 1401.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 25.5, "min": 3.0681818181818183, "max": 25.5, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 1122.0, "min": 135.0, "max": 1401.0, "count": 20 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1718813778", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=hishamcse-SnowballTarget-1 --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.3.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1718814219" }, "total": 441.06794974000013, "count": 1, "self": 0.4374576510001589, "children": { "run_training.setup": { "total": 0.05546249300005002, "count": 1, "self": 0.05546249300005002 }, "TrainerController.start_learning": { "total": 440.5750295959999, "count": 1, "self": 0.5644469389948199, "children": { "TrainerController._reset_env": { "total": 3.052370828999983, "count": 1, "self": 3.052370828999983 }, "TrainerController.advance": { "total": 436.8698130260051, "count": 18201, "self": 0.2647724589992322, "children": { "env_step": { "total": 436.60504056700586, "count": 18201, "self": 283.3563685320114, "children": { "SubprocessEnvManager._take_step": { "total": 152.97553486200593, "count": 18201, "self": 1.4566514960047243, "children": { "TorchPolicy.evaluate": { "total": 151.5188833660012, "count": 18201, "self": 151.5188833660012 } } }, "workers": { "total": 0.2731371729885268, "count": 18201, "self": 0.0, "children": { "worker_root": { "total": 439.40187636698033, "count": 18201, "is_parallel": true, "self": 222.58647667497655, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.005449325000085992, "count": 1, "is_parallel": true, "self": 0.003826786000104221, "children": { "_process_rank_one_or_two_observation": { "total": 0.0016225389999817708, "count": 10, "is_parallel": true, "self": 0.0016225389999817708 } } }, "UnityEnvironment.step": { "total": 0.05703730799996265, "count": 1, "is_parallel": true, "self": 0.0006675840000980315, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0003638429999455184, "count": 1, "is_parallel": true, "self": 0.0003638429999455184 }, "communicator.exchange": { "total": 0.05418945499991423, "count": 1, "is_parallel": true, "self": 0.05418945499991423 }, "steps_from_proto": { "total": 0.001816426000004867, "count": 1, "is_parallel": true, "self": 0.0003321019997883923, "children": { "_process_rank_one_or_two_observation": { "total": 0.0014843240002164748, "count": 10, "is_parallel": true, "self": 0.0014843240002164748 } } } } } } }, "UnityEnvironment.step": { "total": 216.81539969200378, "count": 18200, "is_parallel": true, "self": 10.002867975020763, "children": { "UnityEnvironment._generate_step_input": { "total": 5.1960192459880545, "count": 18200, "is_parallel": true, "self": 5.1960192459880545 }, "communicator.exchange": { "total": 169.28648441399366, "count": 18200, "is_parallel": true, "self": 169.28648441399366 }, "steps_from_proto": { "total": 32.3300280570013, "count": 18200, "is_parallel": true, "self": 6.105900698010487, "children": { "_process_rank_one_or_two_observation": { "total": 26.224127358990813, "count": 182000, "is_parallel": true, "self": 26.224127358990813 } } } } } } } } } } } } }, "trainer_threads": { "total": 0.00011975699999311473, "count": 1, "self": 0.00011975699999311473, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 431.53008547408706, "count": 654407, "is_parallel": true, "self": 13.833332800103335, "children": { "process_trajectory": { "total": 239.37059368498444, "count": 654407, "is_parallel": true, "self": 238.40550158998428, "children": { "RLTrainer._checkpoint": { "total": 0.9650920950001591, "count": 5, "is_parallel": true, "self": 0.9650920950001591 } } }, "_update_policy": { "total": 178.32615898899928, "count": 90, "is_parallel": true, "self": 54.00518391600326, "children": { "TorchPPOOptimizer.update": { "total": 124.32097507299602, "count": 4587, "is_parallel": true, "self": 124.32097507299602 } } } } } } } } }, "TrainerController._save_models": { "total": 0.0882790450000357, "count": 1, "self": 0.001023453000243535, "children": { "RLTrainer._checkpoint": { "total": 0.08725559199979216, "count": 1, "self": 0.08725559199979216 } } } } } } }