ppo-Worm / run_logs /timers.json
sighmon's picture
First training
e991337 verified
{
"name": "root",
"gauges": {
"Worm.Policy.Entropy.mean": {
"value": 1.3861826658248901,
"min": 1.3861826658248901,
"max": 1.418938398361206,
"count": 33
},
"Worm.Policy.Entropy.sum": {
"value": 41585.48046875,
"min": 41585.48046875,
"max": 42568.15234375,
"count": 33
},
"Worm.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 33
},
"Worm.Environment.EpisodeLength.sum": {
"value": 29970.0,
"min": 29970.0,
"max": 29970.0,
"count": 33
},
"Worm.Step.mean": {
"value": 989000.0,
"min": 29000.0,
"max": 989000.0,
"count": 33
},
"Worm.Step.sum": {
"value": 989000.0,
"min": 29000.0,
"max": 989000.0,
"count": 33
},
"Worm.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.004525661468506,
"min": -0.051633987575769424,
"max": 6.004525661468506,
"count": 33
},
"Worm.Policy.ExtrinsicValueEstimate.sum": {
"value": 180.13577270507812,
"min": -1.4973856210708618,
"max": 180.13577270507812,
"count": 33
},
"Worm.Environment.CumulativeReward.mean": {
"value": 61.040428225199385,
"min": 0.1919361693656136,
"max": 61.930335680643715,
"count": 33
},
"Worm.Environment.CumulativeReward.sum": {
"value": 1831.2128467559814,
"min": 5.566148911602795,
"max": 1857.9100704193115,
"count": 33
},
"Worm.Policy.ExtrinsicReward.mean": {
"value": 61.040428225199385,
"min": 0.1919361693656136,
"max": 61.930335680643715,
"count": 33
},
"Worm.Policy.ExtrinsicReward.sum": {
"value": 1831.2128467559814,
"min": 5.566148911602795,
"max": 1857.9100704193115,
"count": 33
},
"Worm.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Worm.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Worm.Losses.PolicyLoss.mean": {
"value": 0.014891214704784076,
"min": 0.01339010396900232,
"max": 0.021539426104850247,
"count": 32
},
"Worm.Losses.PolicyLoss.sum": {
"value": 0.014891214704784076,
"min": 0.01339010396900232,
"max": 0.021539426104850247,
"count": 32
},
"Worm.Losses.ValueLoss.mean": {
"value": 0.6217004770324344,
"min": 0.0009417563407970149,
"max": 0.6217004770324344,
"count": 32
},
"Worm.Losses.ValueLoss.sum": {
"value": 0.6217004770324344,
"min": 0.0009417563407970149,
"max": 0.6217004770324344,
"count": 32
},
"Worm.Policy.LearningRate.mean": {
"value": 1.2000096000000011e-05,
"min": 1.2000096000000011e-05,
"max": 0.0002910000029999999,
"count": 32
},
"Worm.Policy.LearningRate.sum": {
"value": 1.2000096000000011e-05,
"min": 1.2000096000000011e-05,
"max": 0.0002910000029999999,
"count": 32
},
"Worm.Policy.Epsilon.mean": {
"value": 0.10400000000000001,
"min": 0.10400000000000001,
"max": 0.19699999999999998,
"count": 32
},
"Worm.Policy.Epsilon.sum": {
"value": 0.10400000000000001,
"min": 0.10400000000000001,
"max": 0.19699999999999998,
"count": 32
},
"Worm.Policy.Beta.mean": {
"value": 0.00020960000000000022,
"min": 0.00020960000000000022,
"max": 0.0048503,
"count": 32
},
"Worm.Policy.Beta.sum": {
"value": 0.00020960000000000022,
"min": 0.00020960000000000022,
"max": 0.0048503,
"count": 32
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739582328",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Worm.yaml --env=./training-envs-executables/linux/Worm/Worm --run-id=Worm Training --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739583719"
},
"total": 1390.625428935,
"count": 1,
"self": 0.5769163620000199,
"children": {
"run_training.setup": {
"total": 0.021503205000044545,
"count": 1,
"self": 0.021503205000044545
},
"TrainerController.start_learning": {
"total": 1390.0270093679999,
"count": 1,
"self": 2.0359425620704314,
"children": {
"TrainerController._reset_env": {
"total": 3.182218063000164,
"count": 1,
"self": 3.182218063000164
},
"TrainerController.advance": {
"total": 1384.6823440109295,
"count": 101000,
"self": 2.120266040037677,
"children": {
"env_step": {
"total": 1166.401741433958,
"count": 101000,
"self": 967.9786514139496,
"children": {
"SubprocessEnvManager._take_step": {
"total": 197.17970346301627,
"count": 101000,
"self": 7.428139034011792,
"children": {
"TorchPolicy.evaluate": {
"total": 189.75156442900447,
"count": 101000,
"self": 189.75156442900447
}
}
},
"workers": {
"total": 1.2433865569921636,
"count": 101000,
"self": 0.0,
"children": {
"worker_root": {
"total": 1386.0217035990797,
"count": 101000,
"is_parallel": true,
"self": 551.4499236080947,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010331799999221403,
"count": 1,
"is_parallel": true,
"self": 0.00025955199998861644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007736279999335238,
"count": 2,
"is_parallel": true,
"self": 0.0007736279999335238
}
}
},
"UnityEnvironment.step": {
"total": 0.031536455999912505,
"count": 1,
"is_parallel": true,
"self": 0.0002573480003320583,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004152059998432378,
"count": 1,
"is_parallel": true,
"self": 0.0004152059998432378
},
"communicator.exchange": {
"total": 0.030326579999837122,
"count": 1,
"is_parallel": true,
"self": 0.030326579999837122
},
"steps_from_proto": {
"total": 0.0005373219999000867,
"count": 1,
"is_parallel": true,
"self": 0.00016747100016800687,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0003698509997320798,
"count": 2,
"is_parallel": true,
"self": 0.0003698509997320798
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 834.5717799909851,
"count": 100999,
"is_parallel": true,
"self": 18.06819325792003,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.929885190010054,
"count": 100999,
"is_parallel": true,
"self": 30.929885190010054
},
"communicator.exchange": {
"total": 742.2309833430102,
"count": 100999,
"is_parallel": true,
"self": 742.2309833430102
},
"steps_from_proto": {
"total": 43.34271820004483,
"count": 100999,
"is_parallel": true,
"self": 13.99210863813255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.35060956191228,
"count": 201998,
"is_parallel": true,
"self": 29.35060956191228
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 216.16033653693376,
"count": 101000,
"self": 2.382445713875086,
"children": {
"process_trajectory": {
"total": 56.379384717058656,
"count": 101000,
"self": 56.02510363605893,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3542810809997263,
"count": 2,
"self": 0.3542810809997263
}
}
},
"_update_policy": {
"total": 157.398506106,
"count": 33,
"self": 126.13356308099537,
"children": {
"TorchPPOOptimizer.update": {
"total": 31.26494302500464,
"count": 1386,
"self": 31.26494302500464
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.509999472356867e-07,
"count": 1,
"self": 9.509999472356867e-07
},
"TrainerController._save_models": {
"total": 0.12650378099988302,
"count": 1,
"self": 0.0017894199995680538,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12471436100031497,
"count": 1,
"self": 0.12471436100031497
}
}
}
}
}
}
}