pyramids / run_logs /timers.json
Katelie's picture
initial model
7d24530 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6906391978263855,
"min": 0.6906391978263855,
"max": 1.4951658248901367,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20730.2265625,
"min": 20730.2265625,
"max": 45357.3515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.27455276250839233,
"min": -0.11421945691108704,
"max": 0.3237708806991577,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 70.83460998535156,
"min": -27.52688980102539,
"max": 84.50419616699219,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02057463303208351,
"min": 0.0064631132408976555,
"max": 0.29000282287597656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.308255195617676,
"min": 1.6868726015090942,
"max": 68.73066711425781,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06699241352826052,
"min": 0.0656680480939903,
"max": 0.07335828341116818,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0048862029239078,
"min": 0.46529204374341154,
"max": 1.1003742511675227,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00920045412552343,
"min": 0.0003991615375175802,
"max": 0.011246829549712683,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.13800681188285147,
"min": 0.0035924538376582217,
"max": 0.1630658896707852,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.477417507560001e-06,
"min": 7.477417507560001e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011216126261340002,
"min": 0.00011216126261340002,
"max": 0.0032538758153748,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249244,
"min": 0.10249244,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373866,
"min": 1.3886848,
"max": 2.5274535000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025899475600000014,
"min": 0.00025899475600000014,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003884921340000002,
"min": 0.003884921340000002,
"max": 0.10848405748000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009754347614943981,
"min": 0.009754347614943981,
"max": 0.2952248156070709,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14631521701812744,
"min": 0.13918960094451904,
"max": 2.0665736198425293,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 574.7058823529412,
"min": 501.08620689655174,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29310.0,
"min": 15984.0,
"max": 32064.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.010996133662187,
"min": -1.0000000521540642,
"max": 1.2574551498581623,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 52.57179895043373,
"min": -31.99920167028904,
"max": 72.93239869177341,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.010996133662187,
"min": -1.0000000521540642,
"max": 1.2574551498581623,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 52.57179895043373,
"min": -31.99920167028904,
"max": 72.93239869177341,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05742890287505221,
"min": 0.05648217876421705,
"max": 5.640241575893015,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.986302949502715,
"min": 2.986302949502715,
"max": 90.24386521428823,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707091623",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 3 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707093642"
},
"total": 2019.2278016229998,
"count": 1,
"self": 0.4919788150000386,
"children": {
"run_training.setup": {
"total": 0.04893460799996774,
"count": 1,
"self": 0.04893460799996774
},
"TrainerController.start_learning": {
"total": 2018.6868881999999,
"count": 1,
"self": 1.3881859550233457,
"children": {
"TrainerController._reset_env": {
"total": 3.5434164409999767,
"count": 1,
"self": 3.5434164409999767
},
"TrainerController.advance": {
"total": 2013.6664307699762,
"count": 63323,
"self": 1.4140257250232935,
"children": {
"env_step": {
"total": 1401.5376707810005,
"count": 63323,
"self": 1267.18659548997,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.49414123700717,
"count": 63323,
"self": 4.765669864033725,
"children": {
"TorchPolicy.evaluate": {
"total": 128.72847137297344,
"count": 62565,
"self": 128.72847137297344
}
}
},
"workers": {
"total": 0.8569340540234407,
"count": 63323,
"self": 0.0,
"children": {
"worker_root": {
"total": 2013.5183327320265,
"count": 63323,
"is_parallel": true,
"self": 863.6075987310046,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005404927999961728,
"count": 1,
"is_parallel": true,
"self": 0.0035062549999338444,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001898673000027884,
"count": 8,
"is_parallel": true,
"self": 0.001898673000027884
}
}
},
"UnityEnvironment.step": {
"total": 0.06259626900009607,
"count": 1,
"is_parallel": true,
"self": 0.0006871919999866805,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005273060000945407,
"count": 1,
"is_parallel": true,
"self": 0.0005273060000945407
},
"communicator.exchange": {
"total": 0.05953579100003026,
"count": 1,
"is_parallel": true,
"self": 0.05953579100003026
},
"steps_from_proto": {
"total": 0.0018459799999845927,
"count": 1,
"is_parallel": true,
"self": 0.00037511799996536865,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001470862000019224,
"count": 8,
"is_parallel": true,
"self": 0.001470862000019224
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1149.9107340010219,
"count": 63322,
"is_parallel": true,
"self": 35.72028183412158,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.723027451930875,
"count": 63322,
"is_parallel": true,
"self": 25.723027451930875
},
"communicator.exchange": {
"total": 987.1775453110225,
"count": 63322,
"is_parallel": true,
"self": 987.1775453110225
},
"steps_from_proto": {
"total": 101.28987940394677,
"count": 63322,
"is_parallel": true,
"self": 20.37958872600234,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.91029067794443,
"count": 506576,
"is_parallel": true,
"self": 80.91029067794443
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 610.7147342639523,
"count": 63323,
"self": 2.6287915809639344,
"children": {
"process_trajectory": {
"total": 123.11211411799331,
"count": 63323,
"self": 122.91227023899341,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19984387899989997,
"count": 2,
"self": 0.19984387899989997
}
}
},
"_update_policy": {
"total": 484.9738285649951,
"count": 443,
"self": 284.1170614180311,
"children": {
"TorchPPOOptimizer.update": {
"total": 200.856767146964,
"count": 22794,
"self": 200.856767146964
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6080002751550637e-06,
"count": 1,
"self": 1.6080002751550637e-06
},
"TrainerController._save_models": {
"total": 0.0888534260002416,
"count": 1,
"self": 0.0013717900001211092,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08748163600012049,
"count": 1,
"self": 0.08748163600012049
}
}
}
}
}
}
}