ppo-PyramidsRND / run_logs /timers.json
PranavHonrao's picture
Push to hub
1a0d556
raw
history blame contribute delete
No virus
18.8 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1771409660577774,
"min": 0.16918706893920898,
"max": 1.4807435274124146,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5319.8974609375,
"min": 5013.35107421875,
"max": 44919.8359375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999943.0,
"min": 29882.0,
"max": 2999943.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999943.0,
"min": 29882.0,
"max": 2999943.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7463181614875793,
"min": -0.10507097840309143,
"max": 0.8170815706253052,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 217.1785888671875,
"min": -25.42717742919922,
"max": 246.43836975097656,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0003532649134285748,
"min": -0.02730635181069374,
"max": 0.32330048084259033,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.10280008614063263,
"min": -6.853894233703613,
"max": 76.6222152709961,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06608931241152322,
"min": 0.06373722301837656,
"max": 0.07515891595807817,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.925250373761325,
"min": 0.49440748292127157,
"max": 1.0914651812054215,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01508216666046619,
"min": 7.84764739979794e-05,
"max": 0.015985152054262083,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21115033324652666,
"min": 0.0010201941619737323,
"max": 0.22379212875966917,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.43513523594048e-06,
"min": 1.43513523594048e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.009189330316672e-05,
"min": 2.009189330316672e-05,
"max": 0.0037593475468842,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10047834523809526,
"min": 0.10047834523809526,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4066968333333336,
"min": 1.3962282666666668,
"max": 2.6925551000000003,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.778668928571442e-05,
"min": 5.778668928571442e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008090136500000019,
"min": 0.0008090136500000019,
"max": 0.12532626842000003,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004908440168946981,
"min": 0.004908440168946981,
"max": 0.4430857002735138,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.06871816515922546,
"min": 0.06871816515922546,
"max": 3.101599931716919,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 238.44094488188978,
"min": 217.9624060150376,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30282.0,
"min": 16809.0,
"max": 33302.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.730053531020645,
"min": -0.9999125516042113,
"max": 1.7693119844198226,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 219.71679843962193,
"min": -31.997201651334763,
"max": 235.0105982720852,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.730053531020645,
"min": -0.9999125516042113,
"max": 1.7693119844198226,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 219.71679843962193,
"min": -31.997201651334763,
"max": 235.0105982720852,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.012131960569332283,
"min": 0.011887146275582312,
"max": 8.488411870949408,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.5407589923052,
"min": 1.515698526018241,
"max": 144.30300180613995,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703814841",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703822677"
},
"total": 7836.294732758999,
"count": 1,
"self": 1.1023579819984661,
"children": {
"run_training.setup": {
"total": 0.05537129399999685,
"count": 1,
"self": 0.05537129399999685
},
"TrainerController.start_learning": {
"total": 7835.137003483001,
"count": 1,
"self": 5.21531773508832,
"children": {
"TrainerController._reset_env": {
"total": 2.6266490370001065,
"count": 1,
"self": 2.6266490370001065
},
"TrainerController.advance": {
"total": 7827.163768564912,
"count": 193942,
"self": 5.152197262120353,
"children": {
"env_step": {
"total": 5827.347987723881,
"count": 193942,
"self": 5386.994682803064,
"children": {
"SubprocessEnvManager._take_step": {
"total": 437.19124685380984,
"count": 193942,
"self": 16.052182997008003,
"children": {
"TorchPolicy.evaluate": {
"total": 421.13906385680184,
"count": 187559,
"self": 421.13906385680184
}
}
},
"workers": {
"total": 3.162058067006228,
"count": 193942,
"self": 0.0,
"children": {
"worker_root": {
"total": 7816.583856463998,
"count": 193942,
"is_parallel": true,
"self": 2846.4340181499347,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022334600000704086,
"count": 1,
"is_parallel": true,
"self": 0.0005726340000364871,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016608260000339214,
"count": 8,
"is_parallel": true,
"self": 0.0016608260000339214
}
}
},
"UnityEnvironment.step": {
"total": 0.09972983500006194,
"count": 1,
"is_parallel": true,
"self": 0.000637282000070627,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004927180000322551,
"count": 1,
"is_parallel": true,
"self": 0.0004927180000322551
},
"communicator.exchange": {
"total": 0.0968893819999721,
"count": 1,
"is_parallel": true,
"self": 0.0968893819999721
},
"steps_from_proto": {
"total": 0.0017104529999869555,
"count": 1,
"is_parallel": true,
"self": 0.0003462560000571102,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013641969999298453,
"count": 8,
"is_parallel": true,
"self": 0.0013641969999298453
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4970.149838314063,
"count": 193941,
"is_parallel": true,
"self": 115.33656138713923,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.50258446312637,
"count": 193941,
"is_parallel": true,
"self": 81.50258446312637
},
"communicator.exchange": {
"total": 4435.365938568792,
"count": 193941,
"is_parallel": true,
"self": 4435.365938568792
},
"steps_from_proto": {
"total": 337.9447538950051,
"count": 193941,
"is_parallel": true,
"self": 70.72036111841612,
"children": {
"_process_rank_one_or_two_observation": {
"total": 267.22439277658896,
"count": 1551528,
"is_parallel": true,
"self": 267.22439277658896
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1994.6635835789107,
"count": 193942,
"self": 9.761564892138495,
"children": {
"process_trajectory": {
"total": 423.8937372067694,
"count": 193942,
"self": 423.17349326877,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7202439379993848,
"count": 6,
"self": 0.7202439379993848
}
}
},
"_update_policy": {
"total": 1561.0082814800028,
"count": 1392,
"self": 933.3656756832008,
"children": {
"TorchPPOOptimizer.update": {
"total": 627.642605796802,
"count": 68334,
"self": 627.642605796802
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3209992175688967e-06,
"count": 1,
"self": 1.3209992175688967e-06
},
"TrainerController._save_models": {
"total": 0.1312668250011484,
"count": 1,
"self": 0.0023111630016501294,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12895566199949826,
"count": 1,
"self": 0.12895566199949826
}
}
}
}
}
}
}