{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 1.0529404878616333, "min": 1.0529404878616333, "max": 2.8591623306274414, "count": 20 }, "SnowballTarget.Policy.Entropy.sum": { "value": 10088.22265625, "min": 10088.22265625, "max": 29343.583984375, "count": 20 }, "SnowballTarget.Step.mean": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Step.sum": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 12.199426651000977, "min": 0.47029176354408264, "max": 12.199426651000977, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 2378.88818359375, "min": 91.23660278320312, "max": 2455.91748046875, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 8756.0, "min": 8756.0, "max": 10945.0, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.07009796334736451, "min": 0.059441049417763875, "max": 0.07652888418010928, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.28039185338945805, "min": 0.2377641976710555, "max": 0.3826444209005464, "count": 20 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.21495203622708134, "min": 0.12298309716039939, "max": 0.27829058585213684, "count": 20 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.8598081449083254, "min": 0.49193238864159755, "max": 1.3413953357467465, "count": 20 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 8.082097306000005e-06, "min": 8.082097306000005e-06, "max": 0.000291882002706, "count": 20 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 3.232838922400002e-05, "min": 3.232838922400002e-05, "max": 0.00138516003828, "count": 20 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.10269400000000001, "min": 0.10269400000000001, "max": 0.19729400000000002, "count": 20 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.41077600000000003, "min": 0.41077600000000003, "max": 0.96172, "count": 20 }, "SnowballTarget.Policy.Beta.mean": { "value": 0.0001444306000000001, "min": 0.0001444306000000001, "max": 0.0048649706, "count": 20 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.0005777224000000004, "min": 0.0005777224000000004, "max": 0.023089828, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 23.90909090909091, "min": 3.0681818181818183, "max": 24.2, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 1052.0, "min": 135.0, "max": 1331.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 23.90909090909091, "min": 3.0681818181818183, "max": 24.2, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 1052.0, "min": 135.0, "max": 1331.0, "count": 20 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1689621444", "python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]", "command_line_arguments": "/home/alexis/anaconda3/envs/RL-atari/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1689622138" }, "total": 693.7883085770009, "count": 1, "self": 0.42666641599862487, "children": { "run_training.setup": { "total": 0.03411486200275249, "count": 1, "self": 0.03411486200275249 }, "TrainerController.start_learning": { "total": 693.3275272989995, "count": 1, "self": 0.6593087788023695, "children": { "TrainerController._reset_env": { "total": 3.9473821940009657, "count": 1, "self": 3.9473821940009657 }, "TrainerController.advance": { "total": 688.4908155831981, "count": 18205, "self": 0.3197593262884766, "children": { "env_step": { "total": 688.1710562569097, "count": 18205, "self": 510.05224751183414, "children": { "SubprocessEnvManager._take_step": { "total": 177.75420906916042, "count": 18205, "self": 2.129204431159451, "children": { "TorchPolicy.evaluate": { "total": 175.62500463800097, "count": 18205, "self": 175.62500463800097 } } }, "workers": { "total": 0.3645996759150876, "count": 18205, "self": 0.0, "children": { "worker_root": { "total": 692.1858217528425, "count": 18205, "is_parallel": true, "self": 356.728927467484, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002210120001109317, "count": 1, "is_parallel": true, "self": 0.000674126004014397, "children": { "_process_rank_one_or_two_observation": { "total": 0.00153599399709492, "count": 10, "is_parallel": true, "self": 0.00153599399709492 } } }, "UnityEnvironment.step": { "total": 0.04015323200292187, "count": 1, "is_parallel": true, "self": 0.0006454260037571657, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0008926250011427328, "count": 1, "is_parallel": true, "self": 0.0008926250011427328 }, "communicator.exchange": { "total": 0.03621404099976644, "count": 1, "is_parallel": true, "self": 0.03621404099976644 }, "steps_from_proto": { "total": 0.0024011399982555304, "count": 1, "is_parallel": true, "self": 0.0005688380006176885, "children": { "_process_rank_one_or_two_observation": { "total": 0.0018323019976378419, "count": 10, "is_parallel": true, "self": 0.0018323019976378419 } } } } } } }, "UnityEnvironment.step": { "total": 335.45689428535843, "count": 18204, "is_parallel": true, "self": 12.068210647401429, "children": { "UnityEnvironment._generate_step_input": { "total": 9.57947735271955, "count": 18204, "is_parallel": true, "self": 9.57947735271955 }, "communicator.exchange": { "total": 271.79505439597415, "count": 18204, "is_parallel": true, "self": 271.79505439597415 }, "steps_from_proto": { "total": 42.0141518892633, "count": 18204, "is_parallel": true, "self": 8.05652049661876, "children": { "_process_rank_one_or_two_observation": { "total": 33.957631392644544, "count": 182040, "is_parallel": true, "self": 33.957631392644544 } } } } } } } } } } } } }, "trainer_threads": { "total": 0.00031942399800755084, "count": 1, "self": 0.00031942399800755084, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 685.3959481476959, "count": 267283, "is_parallel": true, "self": 8.365898773907247, "children": { "process_trajectory": { "total": 366.5182390647824, "count": 267283, "is_parallel": true, "self": 365.1084889077829, "children": { "RLTrainer._checkpoint": { "total": 1.4097501569995075, "count": 4, "is_parallel": true, "self": 1.4097501569995075 } } }, "_update_policy": { "total": 310.51181030900625, "count": 90, "is_parallel": true, "self": 94.08348173892227, "children": { "TorchPPOOptimizer.update": { "total": 216.42832857008398, "count": 4587, "is_parallel": true, "self": 216.42832857008398 } } } } } } } } }, "TrainerController._save_models": { "total": 0.22970131900001434, "count": 1, "self": 0.0010904570008278824, "children": { "RLTrainer._checkpoint": { "total": 0.22861086199918645, "count": 1, "self": 0.22861086199918645 } } } } } } }