rlzh's picture
First Push
8eda38c verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9609049558639526,
"min": 0.9609049558639526,
"max": 2.865432024002075,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9248.7099609375,
"min": 9248.7099609375,
"max": 29376.41015625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.887149810791016,
"min": 0.3879944980144501,
"max": 12.887149810791016,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2512.994140625,
"min": 75.27093505859375,
"max": 2595.73046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07227496166146297,
"min": 0.06369346271579464,
"max": 0.07628098721524663,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28909984664585187,
"min": 0.25477385086317855,
"max": 0.38140493607623316,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1972687646892725,
"min": 0.12519539271221114,
"max": 0.2994969179817274,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.78907505875709,
"min": 0.5007815708488446,
"max": 1.497484589908637,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.477272727272727,
"min": 3.340909090909091,
"max": 25.477272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1121.0,
"min": 147.0,
"max": 1386.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.477272727272727,
"min": 3.340909090909091,
"max": 25.477272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1121.0,
"min": 147.0,
"max": 1386.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1736912455",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --force --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1736913045"
},
"total": 589.86867209,
"count": 1,
"self": 0.6006728730002351,
"children": {
"run_training.setup": {
"total": 0.07371051199993417,
"count": 1,
"self": 0.07371051199993417
},
"TrainerController.start_learning": {
"total": 589.1942887049998,
"count": 1,
"self": 0.8605975279957647,
"children": {
"TrainerController._reset_env": {
"total": 7.339312580000069,
"count": 1,
"self": 7.339312580000069
},
"TrainerController.advance": {
"total": 580.8461452860042,
"count": 18218,
"self": 0.45363651799948457,
"children": {
"env_step": {
"total": 580.3925087680047,
"count": 18218,
"self": 445.9029560899902,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.06898865900837,
"count": 18218,
"self": 2.1767447160158326,
"children": {
"TorchPolicy.evaluate": {
"total": 131.89224394299254,
"count": 18218,
"self": 131.89224394299254
}
}
},
"workers": {
"total": 0.42056401900617857,
"count": 18218,
"self": 0.0,
"children": {
"worker_root": {
"total": 587.1608246520069,
"count": 18218,
"is_parallel": true,
"self": 282.1022569870155,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003327052000031472,
"count": 1,
"is_parallel": true,
"self": 0.001110227000026498,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002216825000004974,
"count": 10,
"is_parallel": true,
"self": 0.002216825000004974
}
}
},
"UnityEnvironment.step": {
"total": 0.04454938300000322,
"count": 1,
"is_parallel": true,
"self": 0.0008687910000162447,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004554689999167749,
"count": 1,
"is_parallel": true,
"self": 0.0004554689999167749
},
"communicator.exchange": {
"total": 0.04074466500003382,
"count": 1,
"is_parallel": true,
"self": 0.04074466500003382
},
"steps_from_proto": {
"total": 0.0024804580000363785,
"count": 1,
"is_parallel": true,
"self": 0.0004615550001290103,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002018902999907368,
"count": 10,
"is_parallel": true,
"self": 0.002018902999907368
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 305.0585676649914,
"count": 18217,
"is_parallel": true,
"self": 14.359273999973766,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.176915193987043,
"count": 18217,
"is_parallel": true,
"self": 7.176915193987043
},
"communicator.exchange": {
"total": 240.28018126600068,
"count": 18217,
"is_parallel": true,
"self": 240.28018126600068
},
"steps_from_proto": {
"total": 43.242197205029925,
"count": 18217,
"is_parallel": true,
"self": 8.447793082972566,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.79440412205736,
"count": 182170,
"is_parallel": true,
"self": 34.79440412205736
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002206879998993827,
"count": 1,
"self": 0.0002206879998993827,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 574.1359256060615,
"count": 756986,
"is_parallel": true,
"self": 17.471193455089406,
"children": {
"process_trajectory": {
"total": 308.57508535097156,
"count": 756986,
"is_parallel": true,
"self": 306.3483644269718,
"children": {
"RLTrainer._checkpoint": {
"total": 2.2267209239997783,
"count": 4,
"is_parallel": true,
"self": 2.2267209239997783
}
}
},
"_update_policy": {
"total": 248.08964680000054,
"count": 90,
"is_parallel": true,
"self": 68.11527600201055,
"children": {
"TorchPPOOptimizer.update": {
"total": 179.97437079799,
"count": 4587,
"is_parallel": true,
"self": 179.97437079799
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14801262299988593,
"count": 1,
"self": 0.0015093629997409153,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14650326000014502,
"count": 1,
"self": 0.14650326000014502
}
}
}
}
}
}
}