{ "name": "root", "gauges": { "Worm.Policy.Entropy.mean": { "value": 1.3431179523468018, "min": 1.3431179523468018, "max": 1.4233607053756714, "count": 50 }, "Worm.Policy.Entropy.sum": { "value": 40293.5390625, "min": 40293.5390625, "max": 42700.8203125, "count": 50 }, "Worm.Environment.EpisodeLength.mean": { "value": 999.0, "min": 999.0, "max": 999.0, "count": 50 }, "Worm.Environment.EpisodeLength.sum": { "value": 29970.0, "min": 29970.0, "max": 29970.0, "count": 50 }, "Worm.Step.mean": { "value": 1499000.0, "min": 29000.0, "max": 1499000.0, "count": 50 }, "Worm.Step.sum": { "value": 1499000.0, "min": 29000.0, "max": 1499000.0, "count": 50 }, "Worm.Policy.ExtrinsicValueEstimate.mean": { "value": 28.128171920776367, "min": -0.049589935690164566, "max": 28.128171920776367, "count": 50 }, "Worm.Policy.ExtrinsicValueEstimate.sum": { "value": 843.8451538085938, "min": -1.4381080865859985, "max": 843.8451538085938, "count": 50 }, "Worm.Environment.CumulativeReward.mean": { "value": 181.98054911295574, "min": 0.2684007525444031, "max": 184.13802388509114, "count": 50 }, "Worm.Environment.CumulativeReward.sum": { "value": 5459.416473388672, "min": 8.052022576332092, "max": 5524.140716552734, "count": 50 }, "Worm.Policy.ExtrinsicReward.mean": { "value": 181.98054911295574, "min": 0.2684007525444031, "max": 184.13802388509114, "count": 50 }, "Worm.Policy.ExtrinsicReward.sum": { "value": 5459.416473388672, "min": 8.052022576332092, "max": 5524.140716552734, "count": 50 }, "Worm.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 50 }, "Worm.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 50 }, "Worm.Losses.PolicyLoss.mean": { "value": 0.016066309097888216, "min": 0.012886544306682134, "max": 0.021075325824009877, "count": 49 }, "Worm.Losses.PolicyLoss.sum": { "value": 0.016066309097888216, "min": 0.012886544306682134, "max": 0.021075325824009877, "count": 49 }, "Worm.Losses.ValueLoss.mean": { "value": 2.9034164178939093, "min": 0.0015053260555889989, "max": 2.9034164178939093, "count": 49 }, "Worm.Losses.ValueLoss.sum": { "value": 2.9034164178939093, "min": 0.0015053260555889989, "max": 2.9034164178939093, "count": 49 }, "Worm.Policy.LearningRate.mean": { "value": 6.000098000000006e-06, "min": 6.000098000000006e-06, "max": 0.000294000002, "count": 49 }, "Worm.Policy.LearningRate.sum": { "value": 6.000098000000006e-06, "min": 6.000098000000006e-06, "max": 0.000294000002, "count": 49 }, "Worm.Policy.Epsilon.mean": { "value": 0.10200000000000002, "min": 0.10200000000000002, "max": 0.198, "count": 49 }, "Worm.Policy.Epsilon.sum": { "value": 0.10200000000000002, "min": 0.10200000000000002, "max": 0.198, "count": 49 }, "Worm.Policy.Beta.mean": { "value": 0.0001098000000000001, "min": 0.0001098000000000001, "max": 0.0049002, "count": 49 }, "Worm.Policy.Beta.sum": { "value": 0.0001098000000000001, "min": 0.0001098000000000001, "max": 0.0049002, "count": 49 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1671968412", "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Worm.yaml --env=./trained-envs-executables/linux/Worm/Worm --run-id=Worm Training --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1671970237" }, "total": 1825.334337814, "count": 1, "self": 0.42431275300009474, "children": { "run_training.setup": { "total": 0.10706777899986264, "count": 1, "self": 0.10706777899986264 }, "TrainerController.start_learning": { "total": 1824.802957282, "count": 1, "self": 2.4575373880334155, "children": { "TrainerController._reset_env": { "total": 6.3716269389999525, "count": 1, "self": 6.3716269389999525 }, "TrainerController.advance": { "total": 1815.8591879749672, "count": 151000, "self": 2.5582386229966687, "children": { "env_step": { "total": 1479.5933154009717, "count": 151000, "self": 1290.8111871769404, "children": { "SubprocessEnvManager._take_step": { "total": 187.14185654601965, "count": 151000, "self": 12.090324133031118, "children": { "TorchPolicy.evaluate": { "total": 175.05153241298854, "count": 151000, "self": 44.50808285204357, "children": { "TorchPolicy.sample_actions": { "total": 130.54344956094496, "count": 151000, "self": 130.54344956094496 } } } } }, "workers": { "total": 1.6402716780116862, "count": 151000, "self": 0.0, "children": { "worker_root": { "total": 1819.4336571359856, "count": 151000, "is_parallel": true, "self": 697.61935399699, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0010792420000598213, "count": 1, "is_parallel": true, "self": 0.00028113000007579103, "children": { "_process_rank_one_or_two_observation": { "total": 0.0007981119999840303, "count": 2, "is_parallel": true, "self": 0.0007981119999840303 } } }, "UnityEnvironment.step": { "total": 0.028807444999984, "count": 1, "is_parallel": true, "self": 0.0002292980002494005, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0003817369999978837, "count": 1, "is_parallel": true, "self": 0.0003817369999978837 }, "communicator.exchange": { "total": 0.027590266999823143, "count": 1, "is_parallel": true, "self": 0.027590266999823143 }, "steps_from_proto": { "total": 0.0006061429999135726, "count": 1, "is_parallel": true, "self": 0.00023001199997452204, "children": { "_process_rank_one_or_two_observation": { "total": 0.0003761309999390505, "count": 2, "is_parallel": true, "self": 0.0003761309999390505 } } } } } } }, "UnityEnvironment.step": { "total": 1121.8143031389957, "count": 150999, "is_parallel": true, "self": 24.372718621929607, "children": { "UnityEnvironment._generate_step_input": { "total": 45.66290728593685, "count": 150999, "is_parallel": true, "self": 45.66290728593685 }, "communicator.exchange": { "total": 986.347926977023, "count": 150999, "is_parallel": true, "self": 986.347926977023 }, "steps_from_proto": { "total": 65.43075025410621, "count": 150999, "is_parallel": true, "self": 25.40858833710581, "children": { "_process_rank_one_or_two_observation": { "total": 40.0221619170004, "count": 301998, "is_parallel": true, "self": 40.0221619170004 } } } } } } } } } } }, "trainer_advance": { "total": 333.7076339509988, "count": 151000, "self": 3.081711164936678, "children": { "process_trajectory": { "total": 87.07625051506261, "count": 151000, "self": 86.71208963506228, "children": { "RLTrainer._checkpoint": { "total": 0.36416088000032687, "count": 3, "self": 0.36416088000032687 } } }, "_update_policy": { "total": 243.5496722709995, "count": 50, "self": 205.65002695400335, "children": { "TorchPPOOptimizer.update": { "total": 37.89964531699616, "count": 2100, "self": 37.89964531699616 } } } } } } }, "trainer_threads": { "total": 9.229997885995544e-07, "count": 1, "self": 9.229997885995544e-07 }, "TrainerController._save_models": { "total": 0.11460405699972398, "count": 1, "self": 0.0024294829995596956, "children": { "RLTrainer._checkpoint": { "total": 0.11217457400016428, "count": 1, "self": 0.11217457400016428 } } } } } } }