ppo-Pyramids / run_logs /timers.json
eugene6's picture
First Push
aae2dad
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.28666916489601135,
"min": 0.2645026743412018,
"max": 1.4314700365066528,
"count": 45
},
"Pyramids.Policy.Entropy.sum": {
"value": 8650.5283203125,
"min": 7968.9365234375,
"max": 43425.07421875,
"count": 45
},
"Pyramids.Step.mean": {
"value": 1349996.0,
"min": 29886.0,
"max": 1349996.0,
"count": 45
},
"Pyramids.Step.sum": {
"value": 1349996.0,
"min": 29886.0,
"max": 1349996.0,
"count": 45
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7186986804008484,
"min": -0.09877406805753708,
"max": 0.7862449288368225,
"count": 45
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 210.57870483398438,
"min": -23.804550170898438,
"max": 231.9422607421875,
"count": 45
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.020695682615041733,
"min": -0.0020851457957178354,
"max": 0.2536472976207733,
"count": 45
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.063835144042969,
"min": -0.5817556977272034,
"max": 61.128997802734375,
"count": 45
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06693182826550555,
"min": 0.06493968514786545,
"max": 0.07417342162008543,
"count": 45
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9370455957170777,
"min": 0.5774562005802386,
"max": 1.0609060056877375,
"count": 45
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015425430699677143,
"min": 0.0008542959154646646,
"max": 0.01779258903808936,
"count": 45
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21595602979548,
"min": 0.011960142816505304,
"max": 0.24909624653325102,
"count": 45
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00016653106591823095,
"min": 0.00016653106591823095,
"max": 0.000298406025531325,
"count": 45
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0023314349228552335,
"min": 0.0023314349228552335,
"max": 0.003969795176734966,
"count": 45
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.15551034047619047,
"min": 0.15551034047619047,
"max": 0.19946867499999998,
"count": 45
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.177144766666667,
"min": 1.5957493999999999,
"max": 2.8126043000000003,
"count": 45
},
"Pyramids.Policy.Beta.mean": {
"value": 0.005555483013571428,
"min": 0.005555483013571428,
"max": 0.009946920632500002,
"count": 45
},
"Pyramids.Policy.Beta.sum": {
"value": 0.07777676218999999,
"min": 0.07777676218999999,
"max": 0.13233417682999998,
"count": 45
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010156959295272827,
"min": 0.010119530372321606,
"max": 0.5349976420402527,
"count": 45
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14219743013381958,
"min": 0.14219743013381958,
"max": 4.2799811363220215,
"count": 45
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 267.09565217391304,
"min": 250.392,
"max": 994.875,
"count": 45
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30716.0,
"min": 15918.0,
"max": 32390.0,
"count": 45
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7329043326170548,
"min": -0.9297375520691276,
"max": 1.733606383562088,
"count": 45
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 199.2839982509613,
"min": -30.421601682901382,
"max": 216.700797945261,
"count": 45
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7329043326170548,
"min": -0.9297375520691276,
"max": 1.733606383562088,
"count": 45
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 199.2839982509613,
"min": -30.421601682901382,
"max": 216.700797945261,
"count": 45
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028059009661639878,
"min": 0.02772742746845942,
"max": 11.318323723040521,
"count": 45
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.226786111088586,
"min": 2.9391073116566986,
"max": 181.09317956864834,
"count": 45
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 45
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 45
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695842886",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695845334"
},
"total": 2448.065191019,
"count": 1,
"self": 0.22107643899971663,
"children": {
"run_training.setup": {
"total": 0.044287387000167655,
"count": 1,
"self": 0.044287387000167655
},
"TrainerController.start_learning": {
"total": 2447.799827193,
"count": 1,
"self": 2.1897277659950305,
"children": {
"TrainerController._reset_env": {
"total": 4.302280407999888,
"count": 1,
"self": 4.302280407999888
},
"TrainerController.advance": {
"total": 2441.203795940005,
"count": 87366,
"self": 2.1587807599480584,
"children": {
"env_step": {
"total": 1603.5822113199972,
"count": 87366,
"self": 1442.938436330009,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.26421725393243,
"count": 87366,
"self": 6.794734602887502,
"children": {
"TorchPolicy.evaluate": {
"total": 152.46948265104493,
"count": 84876,
"self": 152.46948265104493
}
}
},
"workers": {
"total": 1.3795577360558582,
"count": 87366,
"self": 0.0,
"children": {
"worker_root": {
"total": 2443.927371398999,
"count": 87366,
"is_parallel": true,
"self": 1147.3639511520369,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021367070000906097,
"count": 1,
"is_parallel": true,
"self": 0.0006627569998727267,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001473950000217883,
"count": 8,
"is_parallel": true,
"self": 0.001473950000217883
}
}
},
"UnityEnvironment.step": {
"total": 0.046885526000096434,
"count": 1,
"is_parallel": true,
"self": 0.00043176100007258356,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003626850000273407,
"count": 1,
"is_parallel": true,
"self": 0.0003626850000273407
},
"communicator.exchange": {
"total": 0.04411589599999388,
"count": 1,
"is_parallel": true,
"self": 0.04411589599999388
},
"steps_from_proto": {
"total": 0.0019751840000026277,
"count": 1,
"is_parallel": true,
"self": 0.0003668509998533409,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016083330001492868,
"count": 8,
"is_parallel": true,
"self": 0.0016083330001492868
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1296.5634202469623,
"count": 87365,
"is_parallel": true,
"self": 33.59425536292679,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.941230666021966,
"count": 87365,
"is_parallel": true,
"self": 21.941230666021966
},
"communicator.exchange": {
"total": 1139.835950148002,
"count": 87365,
"is_parallel": true,
"self": 1139.835950148002
},
"steps_from_proto": {
"total": 101.19198407001159,
"count": 87365,
"is_parallel": true,
"self": 21.147035213946992,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.0449488560646,
"count": 698920,
"is_parallel": true,
"self": 80.0449488560646
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 835.4628038600595,
"count": 87366,
"self": 4.197077344057561,
"children": {
"process_trajectory": {
"total": 149.8424963540017,
"count": 87366,
"self": 149.62954606600147,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2129502880002292,
"count": 2,
"self": 0.2129502880002292
}
}
},
"_update_policy": {
"total": 681.4232301620002,
"count": 626,
"self": 434.29953350401297,
"children": {
"TorchPPOOptimizer.update": {
"total": 247.12369665798724,
"count": 30960,
"self": 247.12369665798724
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.032999989547534e-06,
"count": 1,
"self": 1.032999989547534e-06
},
"TrainerController._save_models": {
"total": 0.10402204599995457,
"count": 1,
"self": 0.0014076620000196272,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10261438399993494,
"count": 1,
"self": 0.10261438399993494
}
}
}
}
}
}
}