ppo-PyramidsRND / run_logs /timers.json
YusufTree's picture
First Push
107ab3c verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.0933833122253418,
"min": 0.9691184163093567,
"max": 1.48665189743042,
"count": 5
},
"Pyramids.Policy.Entropy.sum": {
"value": 33588.734375,
"min": 29027.03515625,
"max": 45099.0703125,
"count": 5
},
"Pyramids.Step.mean": {
"value": 149883.0,
"min": 29989.0,
"max": 149883.0,
"count": 5
},
"Pyramids.Step.sum": {
"value": 149883.0,
"min": 29989.0,
"max": 149883.0,
"count": 5
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10172443836927414,
"min": -0.20467771589756012,
"max": -0.07863380759954453,
"count": 5
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -24.515588760375977,
"min": -48.713294982910156,
"max": -18.872114181518555,
"count": 5
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.101700559258461,
"min": 0.101700559258461,
"max": 0.49240267276763916,
"count": 5
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 24.50983428955078,
"min": 24.50983428955078,
"max": 117.19183349609375,
"count": 5
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0693100763953095,
"min": 0.0671115367143609,
"max": 0.0708894545707194,
"count": 5
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9703410695343331,
"min": 0.5671156365657553,
"max": 0.9703410695343331,
"count": 5
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0009546671323255992,
"min": 0.0004828064626301551,
"max": 0.008475775385229348,
"count": 5
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.013365339852558389,
"min": 0.006276484014192016,
"max": 0.06780620308183478,
"count": 5
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0002865125044958333,
"min": 0.0002865125044958333,
"max": 0.00029840608803130417,
"count": 5
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.004011175062941666,
"min": 0.0023872487042504334,
"max": 0.004011175062941666,
"count": 5
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.19550416666666673,
"min": 0.19550416666666673,
"max": 0.19946869583333338,
"count": 5
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.737058333333334,
"min": 1.595749566666667,
"max": 2.737058333333334,
"count": 5
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00955086625,
"min": 0.00955086625,
"max": 0.00994692271375,
"count": 5
},
"Pyramids.Policy.Beta.sum": {
"value": 0.1337121275,
"min": 0.07957538171,
"max": 0.1337121275,
"count": 5
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.07708804309368134,
"min": 0.07708804309368134,
"max": 0.577521026134491,
"count": 5
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.0792325735092163,
"min": 1.0792325735092163,
"max": 4.620168209075928,
"count": 5
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 980.34375,
"min": 975.8787878787879,
"max": 999.0,
"count": 5
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31371.0,
"min": 16788.0,
"max": 32493.0,
"count": 5
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8561812983825803,
"min": -0.9999333851039409,
"max": -0.6736727756532755,
"count": 5
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -27.39780154824257,
"min": -29.99800155311823,
"max": -14.803400881588459,
"count": 5
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8561812983825803,
"min": -0.9999333851039409,
"max": -0.6736727756532755,
"count": 5
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -27.39780154824257,
"min": -29.99800155311823,
"max": -14.803400881588459,
"count": 5
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.8273247933248058,
"min": 0.8273247933248058,
"max": 11.281266773448271,
"count": 5
},
"Pyramids.Policy.RndReward.sum": {
"value": 26.474393386393785,
"min": 26.474393386393785,
"max": 191.7815351486206,
"count": 5
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709318207",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.21.2",
"end_time_seconds": "1709318789"
},
"total": 581.5632782539997,
"count": 1,
"self": 0.8793220980005572,
"children": {
"run_training.setup": {
"total": 0.07174614599989582,
"count": 1,
"self": 0.07174614599989582
},
"TrainerController.start_learning": {
"total": 580.6122100099992,
"count": 1,
"self": 0.45156186096937745,
"children": {
"TrainerController._reset_env": {
"total": 3.4773280099998374,
"count": 1,
"self": 3.4773280099998374
},
"TrainerController.advance": {
"total": 576.4283412310306,
"count": 10904,
"self": 0.5093080230872147,
"children": {
"env_step": {
"total": 357.76259902799575,
"count": 10904,
"self": 321.9364576010166,
"children": {
"SubprocessEnvManager._take_step": {
"total": 35.53499041099985,
"count": 10904,
"self": 1.4564272749958036,
"children": {
"TorchPolicy.evaluate": {
"total": 34.07856313600405,
"count": 10873,
"self": 34.07856313600405
}
}
},
"workers": {
"total": 0.29115101597926696,
"count": 10903,
"self": 0.0,
"children": {
"worker_root": {
"total": 579.2073278669504,
"count": 10903,
"is_parallel": true,
"self": 291.0540263279463,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0033216040001207148,
"count": 1,
"is_parallel": true,
"self": 0.0012001840013908804,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021214199987298343,
"count": 8,
"is_parallel": true,
"self": 0.0021214199987298343
}
}
},
"UnityEnvironment.step": {
"total": 0.1084232430002885,
"count": 1,
"is_parallel": true,
"self": 0.0007741730005363934,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006067179992896854,
"count": 1,
"is_parallel": true,
"self": 0.0006067179992896854
},
"communicator.exchange": {
"total": 0.10467940899980022,
"count": 1,
"is_parallel": true,
"self": 0.10467940899980022
},
"steps_from_proto": {
"total": 0.0023629430006621988,
"count": 1,
"is_parallel": true,
"self": 0.0004908480004814919,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018720950001807068,
"count": 8,
"is_parallel": true,
"self": 0.0018720950001807068
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 288.15330153900413,
"count": 10902,
"is_parallel": true,
"self": 8.984561922977264,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.353613165942079,
"count": 10902,
"is_parallel": true,
"self": 5.353613165942079
},
"communicator.exchange": {
"total": 248.06178748700495,
"count": 10902,
"is_parallel": true,
"self": 248.06178748700495
},
"steps_from_proto": {
"total": 25.75333896307984,
"count": 10902,
"is_parallel": true,
"self": 5.390808681108865,
"children": {
"_process_rank_one_or_two_observation": {
"total": 20.362530281970976,
"count": 87216,
"is_parallel": true,
"self": 20.362530281970976
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 218.1564341799476,
"count": 10903,
"self": 0.7961376429448137,
"children": {
"process_trajectory": {
"total": 30.787840350000806,
"count": 10903,
"self": 30.787840350000806
},
"_update_policy": {
"total": 186.572456187002,
"count": 70,
"self": 79.90111911802705,
"children": {
"TorchPPOOptimizer.update": {
"total": 106.67133706897494,
"count": 3969,
"self": 106.67133706897494
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.0799998310394585e-06,
"count": 1,
"self": 2.0799998310394585e-06
},
"TrainerController._save_models": {
"total": 0.25497682799959875,
"count": 1,
"self": 0.00544127099965408,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24953555699994467,
"count": 1,
"self": 0.24953555699994467
}
}
}
}
}
}
}