armargolis's picture
First training of pyramids
b279908
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4438077211380005,
"min": 0.439700186252594,
"max": 1.4210160970687866,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13207.7177734375,
"min": 13207.7177734375,
"max": 43107.9453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5989148020744324,
"min": -0.11901192367076874,
"max": 0.6260363459587097,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 169.49288940429688,
"min": -28.562862396240234,
"max": 179.04638671875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007324466481804848,
"min": -0.005360542330890894,
"max": 0.4421723783016205,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.072824001312256,
"min": -1.4902307987213135,
"max": 104.79485321044922,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06749547647528484,
"min": 0.063875554923435,
"max": 0.07354423644004793,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9449366706539877,
"min": 0.4860443941607869,
"max": 1.0328867198955312,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014507104554684788,
"min": 0.0002813984611494987,
"max": 0.014633990396490009,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20309946376558702,
"min": 0.003658179994943483,
"max": 0.20899151272897143,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.410997529700002e-06,
"min": 7.410997529700002e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010375396541580003,
"min": 0.00010375396541580003,
"max": 0.0035076407307864997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024703,
"min": 0.1024703,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345842,
"min": 1.3886848,
"max": 2.5692135000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002567829700000001,
"min": 0.0002567829700000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035949615800000014,
"min": 0.0035949615800000014,
"max": 0.11694442864999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011240273714065552,
"min": 0.011240273714065552,
"max": 0.5911240577697754,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15736383199691772,
"min": 0.15736383199691772,
"max": 4.137868404388428,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 311.53684210526313,
"min": 304.44565217391306,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29596.0,
"min": 15984.0,
"max": 33192.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6647318955114547,
"min": -1.0000000521540642,
"max": 1.6914730993329838,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 156.48479817807674,
"min": -31.995201662182808,
"max": 161.61699856817722,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6647318955114547,
"min": -1.0000000521540642,
"max": 1.6914730993329838,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 156.48479817807674,
"min": -31.995201662182808,
"max": 161.61699856817722,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.036966517383568334,
"min": 0.03646471410828908,
"max": 12.153205594047904,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4748526340554236,
"min": 3.3912184120708844,
"max": 194.45128950476646,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673619615",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673621989"
},
"total": 2373.475860319,
"count": 1,
"self": 0.48725479999939125,
"children": {
"run_training.setup": {
"total": 0.12300736700035486,
"count": 1,
"self": 0.12300736700035486
},
"TrainerController.start_learning": {
"total": 2372.865598152,
"count": 1,
"self": 1.7313842219164144,
"children": {
"TrainerController._reset_env": {
"total": 6.66675314899976,
"count": 1,
"self": 6.66675314899976
},
"TrainerController.advance": {
"total": 2364.3633372680843,
"count": 63935,
"self": 1.7358568120635027,
"children": {
"env_step": {
"total": 1644.5635284909504,
"count": 63935,
"self": 1514.2617497739361,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.19573965097106,
"count": 63935,
"self": 5.154243324033359,
"children": {
"TorchPolicy.evaluate": {
"total": 124.0414963269377,
"count": 62575,
"self": 41.13424040887821,
"children": {
"TorchPolicy.sample_actions": {
"total": 82.90725591805949,
"count": 62575,
"self": 82.90725591805949
}
}
}
}
},
"workers": {
"total": 1.106039066043195,
"count": 63935,
"self": 0.0,
"children": {
"worker_root": {
"total": 2367.4840228981275,
"count": 63935,
"is_parallel": true,
"self": 977.4992471950964,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00202177200026199,
"count": 1,
"is_parallel": true,
"self": 0.0007793200002197409,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001242452000042249,
"count": 8,
"is_parallel": true,
"self": 0.001242452000042249
}
}
},
"UnityEnvironment.step": {
"total": 0.04887443000006897,
"count": 1,
"is_parallel": true,
"self": 0.0005499819999386091,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005530829998861009,
"count": 1,
"is_parallel": true,
"self": 0.0005530829998861009
},
"communicator.exchange": {
"total": 0.04580277700006263,
"count": 1,
"is_parallel": true,
"self": 0.04580277700006263
},
"steps_from_proto": {
"total": 0.001968588000181626,
"count": 1,
"is_parallel": true,
"self": 0.0004980620001333591,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014705260000482667,
"count": 8,
"is_parallel": true,
"self": 0.0014705260000482667
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1389.9847757030311,
"count": 63934,
"is_parallel": true,
"self": 31.71018709498003,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.284027293957024,
"count": 63934,
"is_parallel": true,
"self": 26.284027293957024
},
"communicator.exchange": {
"total": 1212.3340886920287,
"count": 63934,
"is_parallel": true,
"self": 1212.3340886920287
},
"steps_from_proto": {
"total": 119.6564726220654,
"count": 63934,
"is_parallel": true,
"self": 27.687992739111905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 91.96847988295349,
"count": 511472,
"is_parallel": true,
"self": 91.96847988295349
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 718.0639519650704,
"count": 63935,
"self": 3.222790625869493,
"children": {
"process_trajectory": {
"total": 165.22215052620595,
"count": 63935,
"self": 164.9975151352064,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22463539099953778,
"count": 2,
"self": 0.22463539099953778
}
}
},
"_update_policy": {
"total": 549.6190108129949,
"count": 449,
"self": 209.30618848697986,
"children": {
"TorchPPOOptimizer.update": {
"total": 340.31282232601507,
"count": 22821,
"self": 340.31282232601507
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0049998309114017e-06,
"count": 1,
"self": 1.0049998309114017e-06
},
"TrainerController._save_models": {
"total": 0.10412250799981848,
"count": 1,
"self": 0.0014301580004030257,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10269234999941546,
"count": 1,
"self": 0.10269234999941546
}
}
}
}
}
}
}