DanGalt's picture
First push
c0f0f38
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43180933594703674,
"min": 0.43126070499420166,
"max": 1.5841691493988037,
"count": 40
},
"Pyramids.Policy.Entropy.sum": {
"value": 13057.9140625,
"min": 12861.9189453125,
"max": 48057.35546875,
"count": 40
},
"Pyramids.Step.mean": {
"value": 1199891.0,
"min": 29952.0,
"max": 1199891.0,
"count": 40
},
"Pyramids.Step.sum": {
"value": 1199891.0,
"min": 29952.0,
"max": 1199891.0,
"count": 40
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4963719844818115,
"min": -0.09791506081819534,
"max": 0.4963719844818115,
"count": 40
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 135.50955200195312,
"min": -23.597530364990234,
"max": 135.50955200195312,
"count": 40
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0096103735268116,
"min": -0.0096103735268116,
"max": 0.1173042431473732,
"count": 40
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.6236319541931152,
"min": -2.6236319541931152,
"max": 28.153018951416016,
"count": 40
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.04931616802267464,
"min": 0.04450815466615105,
"max": 0.052197964594438026,
"count": 40
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.3452131761587225,
"min": 0.20525118841958995,
"max": 0.3985707801184617,
"count": 40
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014306249213404953,
"min": 0.00017832834793734214,
"max": 0.014403587880743933,
"count": 40
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10014374449383467,
"min": 0.0008916417396867106,
"max": 0.10336133686359972,
"count": 40
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.6113845105238155e-06,
"min": 3.6113845105238155e-06,
"max": 0.000296208001264,
"count": 40
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.5279691573666707e-05,
"min": 2.5279691573666707e-05,
"max": 0.0018137743454085832,
"count": 40
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1012037619047619,
"min": 0.1012037619047619,
"max": 0.19873600000000002,
"count": 40
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.7084263333333333,
"min": 0.7084263333333333,
"max": 1.3101369166666668,
"count": 40
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00013025581428571448,
"min": 0.00013025581428571448,
"max": 0.009873726399999998,
"count": 40
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0009117907000000014,
"min": 0.0009117907000000014,
"max": 0.060468682525,
"count": 40
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008115310221910477,
"min": 0.007542174309492111,
"max": 0.18715202808380127,
"count": 40
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.05680717155337334,
"min": 0.05279522016644478,
"max": 0.7486081123352051,
"count": 40
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 383.4230769230769,
"min": 383.4230769230769,
"max": 999.0,
"count": 40
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29907.0,
"min": 15984.0,
"max": 33086.0,
"count": 40
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5396256252932243,
"min": -1.0000000521540642,
"max": 1.5396256252932243,
"count": 40
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 120.0907987728715,
"min": -32.000001668930054,
"max": 120.0907987728715,
"count": 40
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5396256252932243,
"min": -1.0000000521540642,
"max": 1.5396256252932243,
"count": 40
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 120.0907987728715,
"min": -32.000001668930054,
"max": 120.0907987728715,
"count": 40
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03217785571140643,
"min": 0.03217785571140643,
"max": 7.243808610364795,
"count": 40
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.5098727454897016,
"min": 2.2504812776460312,
"max": 115.90093776583672,
"count": 40
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673448733",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673451049"
},
"total": 2316.370975376,
"count": 1,
"self": 0.6260153549997085,
"children": {
"run_training.setup": {
"total": 0.10593957699984458,
"count": 1,
"self": 0.10593957699984458
},
"TrainerController.start_learning": {
"total": 2315.6390204440004,
"count": 1,
"self": 1.4057620270305051,
"children": {
"TrainerController._reset_env": {
"total": 6.388285214000007,
"count": 1,
"self": 6.388285214000007
},
"TrainerController.advance": {
"total": 2307.7510039419694,
"count": 76259,
"self": 1.4356994739641777,
"children": {
"env_step": {
"total": 1637.0692565220188,
"count": 76259,
"self": 1515.5732718749987,
"children": {
"SubprocessEnvManager._take_step": {
"total": 120.61312878302147,
"count": 76259,
"self": 4.999279493094718,
"children": {
"TorchPolicy.evaluate": {
"total": 115.61384928992675,
"count": 75068,
"self": 38.78389159491121,
"children": {
"TorchPolicy.sample_actions": {
"total": 76.82995769501554,
"count": 75068,
"self": 76.82995769501554
}
}
}
}
},
"workers": {
"total": 0.8828558639986568,
"count": 76259,
"self": 0.0,
"children": {
"worker_root": {
"total": 2310.5169555260563,
"count": 76259,
"is_parallel": true,
"self": 909.5085581379908,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016701409999768657,
"count": 1,
"is_parallel": true,
"self": 0.0005794080000214308,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001090732999955435,
"count": 8,
"is_parallel": true,
"self": 0.001090732999955435
}
}
},
"UnityEnvironment.step": {
"total": 0.045528187999934744,
"count": 1,
"is_parallel": true,
"self": 0.0005240550001417432,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000475739999956204,
"count": 1,
"is_parallel": true,
"self": 0.000475739999956204
},
"communicator.exchange": {
"total": 0.042888253000000987,
"count": 1,
"is_parallel": true,
"self": 0.042888253000000987
},
"steps_from_proto": {
"total": 0.0016401399998358102,
"count": 1,
"is_parallel": true,
"self": 0.00048265899999933026,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00115748099983648,
"count": 8,
"is_parallel": true,
"self": 0.00115748099983648
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1401.0083973880655,
"count": 76258,
"is_parallel": true,
"self": 32.891922812191524,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.949304434957867,
"count": 76258,
"is_parallel": true,
"self": 25.949304434957867
},
"communicator.exchange": {
"total": 1224.2630952949764,
"count": 76258,
"is_parallel": true,
"self": 1224.2630952949764
},
"steps_from_proto": {
"total": 117.9040748459397,
"count": 76258,
"is_parallel": true,
"self": 25.372826467717005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 92.5312483782227,
"count": 610064,
"is_parallel": true,
"self": 92.5312483782227
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 669.2460479459864,
"count": 76259,
"self": 2.581488622969573,
"children": {
"process_trajectory": {
"total": 169.37972792301798,
"count": 76259,
"self": 169.18406680901785,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19566111400013142,
"count": 2,
"self": 0.19566111400013142
}
}
},
"_update_policy": {
"total": 497.2848313999989,
"count": 275,
"self": 234.75314104901554,
"children": {
"TorchPPOOptimizer.update": {
"total": 262.53169035098335,
"count": 13821,
"self": 262.53169035098335
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.800002433417831e-07,
"count": 1,
"self": 8.800002433417831e-07
},
"TrainerController._save_models": {
"total": 0.09396838100019522,
"count": 1,
"self": 0.001431187999969552,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09253719300022567,
"count": 1,
"self": 0.09253719300022567
}
}
}
}
}
}
}