Paramids / run_logs /timers.json
Penguin-N's picture
First Commit
3fcee2b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.659187912940979,
"min": 0.6173156499862671,
"max": 1.4114888906478882,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19912.748046875,
"min": 18420.69921875,
"max": 42818.92578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989965.0,
"min": 29920.0,
"max": 989965.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989965.0,
"min": 29920.0,
"max": 989965.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1626151204109192,
"min": -0.1080414354801178,
"max": 0.1626151204109192,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 41.141624450683594,
"min": -26.037986755371094,
"max": 41.141624450683594,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.6712524890899658,
"min": -0.030549194663763046,
"max": 1.6712524890899658,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 422.8268737792969,
"min": -7.667847633361816,
"max": 422.8268737792969,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06901794078392015,
"min": 0.06467967473791289,
"max": 0.07363861727630608,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9662511709748821,
"min": 0.5891089382104486,
"max": 1.0328101998870718,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.6948818891074089,
"min": 0.00013090192973598186,
"max": 0.6948818891074089,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 9.728346447503725,
"min": 0.0018326270163037462,
"max": 9.728346447503725,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.662783160057142e-06,
"min": 7.662783160057142e-06,
"max": 0.000294853726715425,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001072789642408,
"min": 0.0001072789642408,
"max": 0.00337623017459,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255422857142858,
"min": 0.10255422857142858,
"max": 0.19828457500000002,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357592000000001,
"min": 1.4357592000000001,
"max": 2.4441787,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026516743428571434,
"min": 0.00026516743428571434,
"max": 0.009828629042499998,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037123440800000008,
"min": 0.0037123440800000008,
"max": 0.112558459,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013696732930839062,
"min": 0.013696732930839062,
"max": 0.42166799306869507,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1917542666196823,
"min": 0.1917542666196823,
"max": 3.3733439445495605,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 706.6428571428571,
"min": 696.609756097561,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29679.0,
"min": 16974.0,
"max": 32650.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.626523764892703,
"min": -0.9997750516049564,
"max": 0.65857329401705,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 26.313998125493526,
"min": -31.992801651358604,
"max": 29.63579823076725,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.626523764892703,
"min": -0.9997750516049564,
"max": 0.65857329401705,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 26.313998125493526,
"min": -31.992801651358604,
"max": 29.63579823076725,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.10075437982838291,
"min": 0.10075437982838291,
"max": 7.98027394256658,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.231683952792082,
"min": 4.165264769690111,
"max": 143.64493096619844,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703065503",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703067752"
},
"total": 2248.79340017,
"count": 1,
"self": 0.9867051340002035,
"children": {
"run_training.setup": {
"total": 0.047504747999937535,
"count": 1,
"self": 0.047504747999937535
},
"TrainerController.start_learning": {
"total": 2247.759190288,
"count": 1,
"self": 1.6674473260336526,
"children": {
"TrainerController._reset_env": {
"total": 2.264622074000272,
"count": 1,
"self": 2.264622074000272
},
"TrainerController.advance": {
"total": 2243.6978821739667,
"count": 63213,
"self": 1.6301510930070435,
"children": {
"env_step": {
"total": 1602.5680590609932,
"count": 63213,
"self": 1458.6665379571232,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.90357641197352,
"count": 63213,
"self": 5.14835664994871,
"children": {
"TorchPolicy.evaluate": {
"total": 137.7552197620248,
"count": 62546,
"self": 137.7552197620248
}
}
},
"workers": {
"total": 0.9979446918964641,
"count": 63213,
"self": 0.0,
"children": {
"worker_root": {
"total": 2242.175577025978,
"count": 63213,
"is_parallel": true,
"self": 913.561316739946,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017532130000290636,
"count": 1,
"is_parallel": true,
"self": 0.0005398879993663286,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001213325000662735,
"count": 8,
"is_parallel": true,
"self": 0.001213325000662735
}
}
},
"UnityEnvironment.step": {
"total": 0.07721136999998635,
"count": 1,
"is_parallel": true,
"self": 0.0006264830003601674,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005295699997986958,
"count": 1,
"is_parallel": true,
"self": 0.0005295699997986958
},
"communicator.exchange": {
"total": 0.07453059599993139,
"count": 1,
"is_parallel": true,
"self": 0.07453059599993139
},
"steps_from_proto": {
"total": 0.0015247209998960898,
"count": 1,
"is_parallel": true,
"self": 0.0002969679999296204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012277529999664694,
"count": 8,
"is_parallel": true,
"self": 0.0012277529999664694
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1328.6142602860318,
"count": 63212,
"is_parallel": true,
"self": 36.39443299213008,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.39216007598725,
"count": 63212,
"is_parallel": true,
"self": 26.39216007598725
},
"communicator.exchange": {
"total": 1159.6169457130236,
"count": 63212,
"is_parallel": true,
"self": 1159.6169457130236
},
"steps_from_proto": {
"total": 106.2107215048909,
"count": 63212,
"is_parallel": true,
"self": 22.016268685833438,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.19445281905746,
"count": 505696,
"is_parallel": true,
"self": 84.19445281905746
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 639.4996720199665,
"count": 63213,
"self": 2.960547474011946,
"children": {
"process_trajectory": {
"total": 130.12816760195255,
"count": 63213,
"self": 129.88963012095337,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23853748099918448,
"count": 2,
"self": 0.23853748099918448
}
}
},
"_update_policy": {
"total": 506.410956944002,
"count": 448,
"self": 300.8513082270051,
"children": {
"TorchPPOOptimizer.update": {
"total": 205.5596487169969,
"count": 22764,
"self": 205.5596487169969
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3439994290820323e-06,
"count": 1,
"self": 1.3439994290820323e-06
},
"TrainerController._save_models": {
"total": 0.12923736999982793,
"count": 1,
"self": 0.0022860010003569187,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12695136899947101,
"count": 1,
"self": 0.12695136899947101
}
}
}
}
}
}
}