ppo-Pyramids / run_logs /timers.json
ezrab's picture
First Push
cf31a15 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.211785227060318,
"min": 0.1946796178817749,
"max": 1.5224860906600952,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 6326.4482421875,
"min": 5834.15869140625,
"max": 46186.13671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989901.0,
"min": 29952.0,
"max": 989901.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989901.0,
"min": 29952.0,
"max": 989901.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7602939009666443,
"min": -0.07265773415565491,
"max": 0.7602939009666443,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 222.0058135986328,
"min": -17.510513305664062,
"max": 222.0058135986328,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.040066372603178024,
"min": 0.040066372603178024,
"max": 1.1749746799468994,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 11.699380874633789,
"min": 11.699380874633789,
"max": 283.16888427734375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.03294433915011939,
"min": 0.028475876946079857,
"max": 0.04587337804189178,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.23061037405083576,
"min": 0.18349351216756712,
"max": 0.27638394188640325,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015712156816984395,
"min": 0.0038830644961091736,
"max": 0.02327030710993579,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10998509771889077,
"min": 0.027181451472764215,
"max": 0.12701877288054675,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.912040219828571e-06,
"min": 7.912040219828571e-06,
"max": 0.00029544960151680006,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 5.53842815388e-05,
"min": 5.53842815388e-05,
"max": 0.0019394424535191998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10263731428571429,
"min": 0.10263731428571429,
"max": 0.19848320000000003,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.7184612,
"min": 0.7184612,
"max": 1.3960433,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00027346769714285717,
"min": 0.00027346769714285717,
"max": 0.009848471680000002,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0019142738800000004,
"min": 0.0019142738800000004,
"max": 0.06465343192,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.03420932963490486,
"min": 0.03420932963490486,
"max": 1.5784579515457153,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23946531116962433,
"min": 0.23946531116962433,
"max": 6.313831806182861,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 249.64705882352942,
"min": 249.64705882352942,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29708.0,
"min": 15984.0,
"max": 33271.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7503529301210612,
"min": -1.0000000521540642,
"max": 1.7503529301210612,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 208.29199868440628,
"min": -29.245001673698425,
"max": 209.1213981807232,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7503529301210612,
"min": -1.0000000521540642,
"max": 1.7503529301210612,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 208.29199868440628,
"min": -29.245001673698425,
"max": 209.1213981807232,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08683016150529466,
"min": 0.08683016150529466,
"max": 19.198913529515266,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 10.332789219130063,
"min": 10.332789219130063,
"max": 510.13557055592537,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741228206",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1741232345"
},
"total": 4138.321809829,
"count": 1,
"self": 0.47562991899940243,
"children": {
"run_training.setup": {
"total": 0.10751997299996674,
"count": 1,
"self": 0.10751997299996674
},
"TrainerController.start_learning": {
"total": 4137.738659937,
"count": 1,
"self": 2.1328908790274,
"children": {
"TrainerController._reset_env": {
"total": 6.418224683999995,
"count": 1,
"self": 6.418224683999995
},
"TrainerController.advance": {
"total": 4128.719837755973,
"count": 64835,
"self": 2.1788465019526484,
"children": {
"env_step": {
"total": 2254.438757555011,
"count": 64835,
"self": 1923.9197663909936,
"children": {
"SubprocessEnvManager._take_step": {
"total": 329.24431494997634,
"count": 64835,
"self": 7.465833500981489,
"children": {
"TorchPolicy.evaluate": {
"total": 321.77848144899485,
"count": 62559,
"self": 321.77848144899485
}
}
},
"workers": {
"total": 1.2746762140409373,
"count": 64835,
"self": 0.0,
"children": {
"worker_root": {
"total": 4133.126782388971,
"count": 64835,
"is_parallel": true,
"self": 2371.8190018989253,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00279798200000414,
"count": 1,
"is_parallel": true,
"self": 0.0009069270000736651,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018910549999304749,
"count": 8,
"is_parallel": true,
"self": 0.0018910549999304749
}
}
},
"UnityEnvironment.step": {
"total": 0.049881557999981396,
"count": 1,
"is_parallel": true,
"self": 0.0007859379999786142,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006339110000226356,
"count": 1,
"is_parallel": true,
"self": 0.0006339110000226356
},
"communicator.exchange": {
"total": 0.04627840199998445,
"count": 1,
"is_parallel": true,
"self": 0.04627840199998445
},
"steps_from_proto": {
"total": 0.0021833069999956933,
"count": 1,
"is_parallel": true,
"self": 0.00047210000002451125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001711206999971182,
"count": 8,
"is_parallel": true,
"self": 0.001711206999971182
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1761.307780490046,
"count": 64834,
"is_parallel": true,
"self": 41.71716790626556,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.109792606913743,
"count": 64834,
"is_parallel": true,
"self": 28.109792606913743
},
"communicator.exchange": {
"total": 1588.2742161969807,
"count": 64834,
"is_parallel": true,
"self": 1588.2742161969807
},
"steps_from_proto": {
"total": 103.20660377988622,
"count": 64834,
"is_parallel": true,
"self": 22.666827304719106,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.53977647516712,
"count": 518672,
"is_parallel": true,
"self": 80.53977647516712
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1872.1022336990093,
"count": 64835,
"self": 4.3382759709991205,
"children": {
"process_trajectory": {
"total": 266.6639659390084,
"count": 64835,
"self": 265.79424973100816,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8697162080002272,
"count": 2,
"self": 0.8697162080002272
}
}
},
"_update_policy": {
"total": 1601.0999917890017,
"count": 235,
"self": 271.64368517001026,
"children": {
"TorchPPOOptimizer.update": {
"total": 1329.4563066189914,
"count": 5754,
"self": 1329.4563066189914
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.124000275216531e-06,
"count": 1,
"self": 2.124000275216531e-06
},
"TrainerController._save_models": {
"total": 0.46770449399991776,
"count": 1,
"self": 0.016055266999501328,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45164922700041643,
"count": 1,
"self": 0.45164922700041643
}
}
}
}
}
}
}