ppo-Pyramids / run_logs /timers.json
vinoth16's picture
First Push
979c66b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.38749825954437256,
"min": 0.38617178797721863,
"max": 1.4779518842697144,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11550.5478515625,
"min": 11550.5478515625,
"max": 44835.1484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989899.0,
"min": 29908.0,
"max": 989899.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989899.0,
"min": 29908.0,
"max": 989899.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.37798383831977844,
"min": -0.09477484226226807,
"max": 0.4683753252029419,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 101.29966735839844,
"min": -22.840736389160156,
"max": 126.46133422851562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0241877231746912,
"min": -0.031342942267656326,
"max": 0.2993612289428711,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -6.482309818267822,
"min": -8.462594032287598,
"max": 70.9486083984375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07015097327772915,
"min": 0.06414965563355765,
"max": 0.07358444599493918,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9821136258882082,
"min": 0.4490475894349036,
"max": 1.1001935511548984,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013804367392454187,
"min": 0.0005465998558006482,
"max": 0.01438293275423348,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1932611434943586,
"min": 0.004919398702205834,
"max": 0.21574399131350222,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.482118934564283e-06,
"min": 7.482118934564283e-06,
"max": 0.0002952348015884,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010474966508389996,
"min": 0.00010474966508389996,
"max": 0.0035078921307027,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249400714285715,
"min": 0.10249400714285715,
"max": 0.1984116,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4349161000000001,
"min": 1.3888812,
"max": 2.5692973,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002591513135714285,
"min": 0.0002591513135714285,
"max": 0.00984131884,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036281183899999985,
"min": 0.0036281183899999985,
"max": 0.11695280027,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011420076712965965,
"min": 0.011420076712965965,
"max": 0.5271530747413635,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15988107025623322,
"min": 0.15988107025623322,
"max": 3.6900713443756104,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 467.7971014492754,
"min": 401.64383561643837,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32278.0,
"min": 16707.0,
"max": 32278.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2712347583062407,
"min": -1.0000000521540642,
"max": 1.486647345517811,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 87.71519832313061,
"min": -32.000001668930054,
"max": 112.98519825935364,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2712347583062407,
"min": -1.0000000521540642,
"max": 1.486647345517811,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 87.71519832313061,
"min": -32.000001668930054,
"max": 112.98519825935364,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05505368986801393,
"min": 0.048350382187522516,
"max": 10.450664348462048,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.798704600892961,
"min": 3.577928281876666,
"max": 177.66129392385483,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1747904210",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1747907275"
},
"total": 3064.326325808,
"count": 1,
"self": 0.5930943060006939,
"children": {
"run_training.setup": {
"total": 0.036050697999598924,
"count": 1,
"self": 0.036050697999598924
},
"TrainerController.start_learning": {
"total": 3063.697180804,
"count": 1,
"self": 2.413947020057094,
"children": {
"TrainerController._reset_env": {
"total": 4.522547317999852,
"count": 1,
"self": 4.522547317999852
},
"TrainerController.advance": {
"total": 3056.676068109943,
"count": 63762,
"self": 2.54273445504532,
"children": {
"env_step": {
"total": 2019.632045587955,
"count": 63762,
"self": 1861.8729795300887,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.35573263386868,
"count": 63762,
"self": 6.8219189418614405,
"children": {
"TorchPolicy.evaluate": {
"total": 149.53381369200724,
"count": 62569,
"self": 149.53381369200724
}
}
},
"workers": {
"total": 1.4033334239975375,
"count": 63762,
"self": 0.0,
"children": {
"worker_root": {
"total": 3056.4151284029163,
"count": 63762,
"is_parallel": true,
"self": 1366.5519604488477,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003977787999701832,
"count": 1,
"is_parallel": true,
"self": 0.0015183109994723054,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024594770002295263,
"count": 8,
"is_parallel": true,
"self": 0.0024594770002295263
}
}
},
"UnityEnvironment.step": {
"total": 0.22586353599990616,
"count": 1,
"is_parallel": true,
"self": 0.004402599999593804,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0007351910003308149,
"count": 1,
"is_parallel": true,
"self": 0.0007351910003308149
},
"communicator.exchange": {
"total": 0.19935630000009041,
"count": 1,
"is_parallel": true,
"self": 0.19935630000009041
},
"steps_from_proto": {
"total": 0.02136944499989113,
"count": 1,
"is_parallel": true,
"self": 0.005143133999808924,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.016226311000082205,
"count": 8,
"is_parallel": true,
"self": 0.016226311000082205
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1689.8631679540686,
"count": 63761,
"is_parallel": true,
"self": 45.12232580395721,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.770029259028433,
"count": 63761,
"is_parallel": true,
"self": 30.770029259028433
},
"communicator.exchange": {
"total": 1487.2160940550184,
"count": 63761,
"is_parallel": true,
"self": 1487.2160940550184
},
"steps_from_proto": {
"total": 126.75471883606451,
"count": 63761,
"is_parallel": true,
"self": 27.568633068331565,
"children": {
"_process_rank_one_or_two_observation": {
"total": 99.18608576773295,
"count": 510088,
"is_parallel": true,
"self": 99.18608576773295
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1034.5012880669428,
"count": 63762,
"self": 4.350158363921764,
"children": {
"process_trajectory": {
"total": 161.9294855840185,
"count": 63762,
"self": 161.6172658900191,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31221969399939553,
"count": 2,
"self": 0.31221969399939553
}
}
},
"_update_policy": {
"total": 868.2216441190026,
"count": 450,
"self": 357.2702732719622,
"children": {
"TorchPPOOptimizer.update": {
"total": 510.9513708470404,
"count": 22812,
"self": 510.9513708470404
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0410003596916795e-06,
"count": 1,
"self": 1.0410003596916795e-06
},
"TrainerController._save_models": {
"total": 0.08461731499937741,
"count": 1,
"self": 0.001822034999349853,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08279528000002756,
"count": 1,
"self": 0.08279528000002756
}
}
}
}
}
}
}