fivedoctors's picture
First Push
aace1c6 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.38989493250846863,
"min": 0.38989493250846863,
"max": 1.459467887878418,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11821.6142578125,
"min": 11744.8359375,
"max": 44274.41796875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989886.0,
"min": 29952.0,
"max": 989886.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989886.0,
"min": 29952.0,
"max": 989886.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3971599042415619,
"min": -0.13952460885047913,
"max": 0.3971599042415619,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 105.64453125,
"min": -33.06733322143555,
"max": 105.64453125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05755922943353653,
"min": -0.003959978930652142,
"max": 0.23779860138893127,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 15.310754776000977,
"min": -1.0375144481658936,
"max": 56.35826873779297,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06767690789032108,
"min": 0.06467115107404332,
"max": 0.07372450696796481,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9474767104644951,
"min": 0.5160715487757537,
"max": 1.0568009033837977,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013338869866163892,
"min": 0.00016369380130408785,
"max": 0.015343458799179643,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18674417812629449,
"min": 0.0018006318143449663,
"max": 0.214808423188515,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.358626118585714e-06,
"min": 7.358626118585714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001030207656602,
"min": 0.0001030207656602,
"max": 0.003117604660798499,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245284285714285,
"min": 0.10245284285714285,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343397999999998,
"min": 1.3886848,
"max": 2.4018118999999998,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002550390014285715,
"min": 0.0002550390014285715,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035705460200000008,
"min": 0.0035705460200000008,
"max": 0.10393622984999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012142250314354897,
"min": 0.011802700348198414,
"max": 0.39324983954429626,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16999150812625885,
"min": 0.16999150812625885,
"max": 2.752748966217041,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 434.5970149253731,
"min": 434.5970149253731,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29118.0,
"min": 15984.0,
"max": 33029.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4459492341795963,
"min": -1.0000000521540642,
"max": 1.4459492341795963,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 96.87859869003296,
"min": -31.998001664876938,
"max": 100.19379919022322,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4459492341795963,
"min": -1.0000000521540642,
"max": 1.4459492341795963,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 96.87859869003296,
"min": -31.998001664876938,
"max": 100.19379919022322,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05406994959162487,
"min": 0.05406994959162487,
"max": 7.976564284414053,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6226866226388665,
"min": 3.607405538466992,
"max": 127.62502855062485,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1746254014",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1746255738"
},
"total": 1724.25962813,
"count": 1,
"self": 0.37215418400001,
"children": {
"run_training.setup": {
"total": 0.020224910000024465,
"count": 1,
"self": 0.020224910000024465
},
"TrainerController.start_learning": {
"total": 1723.867249036,
"count": 1,
"self": 1.5361950000292381,
"children": {
"TrainerController._reset_env": {
"total": 2.3355003340000167,
"count": 1,
"self": 2.3355003340000167
},
"TrainerController.advance": {
"total": 1719.9123633599713,
"count": 63432,
"self": 1.4718718699273268,
"children": {
"env_step": {
"total": 1077.400158871074,
"count": 63432,
"self": 922.9277329039546,
"children": {
"SubprocessEnvManager._take_step": {
"total": 153.5843421890877,
"count": 63432,
"self": 4.532435215021906,
"children": {
"TorchPolicy.evaluate": {
"total": 149.0519069740658,
"count": 62551,
"self": 149.0519069740658
}
}
},
"workers": {
"total": 0.8880837780316142,
"count": 63432,
"self": 0.0,
"children": {
"worker_root": {
"total": 1721.5458141550184,
"count": 63432,
"is_parallel": true,
"self": 894.7023304289614,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001964313999906153,
"count": 1,
"is_parallel": true,
"self": 0.0006639169996560668,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013003970002500864,
"count": 8,
"is_parallel": true,
"self": 0.0013003970002500864
}
}
},
"UnityEnvironment.step": {
"total": 0.04024367700003495,
"count": 1,
"is_parallel": true,
"self": 0.00035679600023286184,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003021649999936926,
"count": 1,
"is_parallel": true,
"self": 0.0003021649999936926
},
"communicator.exchange": {
"total": 0.03846786899998733,
"count": 1,
"is_parallel": true,
"self": 0.03846786899998733
},
"steps_from_proto": {
"total": 0.001116846999821064,
"count": 1,
"is_parallel": true,
"self": 0.00024726099991312367,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008695859999079403,
"count": 8,
"is_parallel": true,
"self": 0.0008695859999079403
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 826.843483726057,
"count": 63431,
"is_parallel": true,
"self": 20.94113800708533,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.131560981994653,
"count": 63431,
"is_parallel": true,
"self": 14.131560981994653
},
"communicator.exchange": {
"total": 729.2698708250127,
"count": 63431,
"is_parallel": true,
"self": 729.2698708250127
},
"steps_from_proto": {
"total": 62.50091391196429,
"count": 63431,
"is_parallel": true,
"self": 13.66639323884965,
"children": {
"_process_rank_one_or_two_observation": {
"total": 48.83452067311464,
"count": 507448,
"is_parallel": true,
"self": 48.83452067311464
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 641.0403326189701,
"count": 63432,
"self": 2.740270974936493,
"children": {
"process_trajectory": {
"total": 118.41188175502657,
"count": 63432,
"self": 118.21019919002674,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20168256499982817,
"count": 2,
"self": 0.20168256499982817
}
}
},
"_update_policy": {
"total": 519.888179889007,
"count": 439,
"self": 285.700794121052,
"children": {
"TorchPPOOptimizer.update": {
"total": 234.187385767955,
"count": 22851,
"self": 234.187385767955
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.779996616998687e-07,
"count": 1,
"self": 9.779996616998687e-07
},
"TrainerController._save_models": {
"total": 0.08318936399973609,
"count": 1,
"self": 0.00144750599974941,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08174185799998668,
"count": 1,
"self": 0.08174185799998668
}
}
}
}
}
}
}