ppo-Pyramids / run_logs /timers.json
MoadJ's picture
Pyramids push
e70f08a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2940797507762909,
"min": 0.29177945852279663,
"max": 1.5266907215118408,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8704.7607421875,
"min": 8704.7607421875,
"max": 46313.69140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989942.0,
"min": 29952.0,
"max": 989942.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989942.0,
"min": 29952.0,
"max": 989942.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5738489627838135,
"min": -0.1804431676864624,
"max": 0.6163069605827332,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.82540893554688,
"min": -42.76502990722656,
"max": 174.4148712158203,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.010815862566232681,
"min": -0.015650028362870216,
"max": 0.22419953346252441,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.0500731468200684,
"min": -4.428957939147949,
"max": 54.03208923339844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06780801963500886,
"min": 0.06555721167934146,
"max": 0.0749624230049362,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.017120294525133,
"min": 0.5044945766376572,
"max": 1.0534422545883275,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014588954404462128,
"min": 0.0008508402200227301,
"max": 0.016877641807941327,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21883431606693193,
"min": 0.008508402200227302,
"max": 0.2362869853111786,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.524897491733334e-06,
"min": 7.524897491733334e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011287346237600001,
"min": 0.00011287346237600001,
"max": 0.0035073320308893997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250826666666667,
"min": 0.10250826666666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.537624,
"min": 1.3886848,
"max": 2.5691106,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002605758400000001,
"min": 0.0002605758400000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039086376000000015,
"min": 0.0039086376000000015,
"max": 0.11693414894000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015421492978930473,
"min": 0.015421492978930473,
"max": 0.4567631185054779,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23132239282131195,
"min": 0.2239353060722351,
"max": 3.1973419189453125,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 326.8404255319149,
"min": 319.3404255319149,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30723.0,
"min": 15984.0,
"max": 32963.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.630333312256362,
"min": -1.0000000521540642,
"max": 1.6567612795099136,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 151.62099803984165,
"min": -31.99920167028904,
"max": 154.07879899442196,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.630333312256362,
"min": -1.0000000521540642,
"max": 1.6567612795099136,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 151.62099803984165,
"min": -31.99920167028904,
"max": 154.07879899442196,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05173430486922703,
"min": 0.05173430486922703,
"max": 8.840237027965486,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.811290352838114,
"min": 4.707263820106164,
"max": 141.44379244744778,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1752183752",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1752185970"
},
"total": 2217.8136008310003,
"count": 1,
"self": 0.6113388430003397,
"children": {
"run_training.setup": {
"total": 0.021933667000041623,
"count": 1,
"self": 0.021933667000041623
},
"TrainerController.start_learning": {
"total": 2217.1803283209997,
"count": 1,
"self": 1.3017846129901045,
"children": {
"TrainerController._reset_env": {
"total": 3.20038402900002,
"count": 1,
"self": 3.20038402900002
},
"TrainerController.advance": {
"total": 2212.5938138870097,
"count": 63927,
"self": 1.4012302340447604,
"children": {
"env_step": {
"total": 1551.2036206459402,
"count": 63927,
"self": 1402.9065426639363,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.51713223103047,
"count": 63927,
"self": 4.527033846041718,
"children": {
"TorchPolicy.evaluate": {
"total": 142.99009838498876,
"count": 62569,
"self": 142.99009838498876
}
}
},
"workers": {
"total": 0.7799457509732974,
"count": 63927,
"self": 0.0,
"children": {
"worker_root": {
"total": 2212.0106330240365,
"count": 63927,
"is_parallel": true,
"self": 920.6861952390079,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005082674000050247,
"count": 1,
"is_parallel": true,
"self": 0.003551874000152111,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015307999998981359,
"count": 8,
"is_parallel": true,
"self": 0.0015307999998981359
}
}
},
"UnityEnvironment.step": {
"total": 0.10255239499997515,
"count": 1,
"is_parallel": true,
"self": 0.0005652109999800814,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004415379999613833,
"count": 1,
"is_parallel": true,
"self": 0.0004415379999613833
},
"communicator.exchange": {
"total": 0.0997881200000279,
"count": 1,
"is_parallel": true,
"self": 0.0997881200000279
},
"steps_from_proto": {
"total": 0.0017575260000057824,
"count": 1,
"is_parallel": true,
"self": 0.0003621160000761847,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013954099999295977,
"count": 8,
"is_parallel": true,
"self": 0.0013954099999295977
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1291.3244377850285,
"count": 63926,
"is_parallel": true,
"self": 32.3906772240407,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.91195796497334,
"count": 63926,
"is_parallel": true,
"self": 22.91195796497334
},
"communicator.exchange": {
"total": 1139.8625725100064,
"count": 63926,
"is_parallel": true,
"self": 1139.8625725100064
},
"steps_from_proto": {
"total": 96.15923008600811,
"count": 63926,
"is_parallel": true,
"self": 19.080573586083517,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.07865649992459,
"count": 511408,
"is_parallel": true,
"self": 77.07865649992459
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 659.9889630070247,
"count": 63927,
"self": 2.5177483249825627,
"children": {
"process_trajectory": {
"total": 125.05152368904226,
"count": 63927,
"self": 124.76706515004219,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28445853900007023,
"count": 2,
"self": 0.28445853900007023
}
}
},
"_update_policy": {
"total": 532.4196909929999,
"count": 448,
"self": 299.0217156079895,
"children": {
"TorchPPOOptimizer.update": {
"total": 233.3979753850104,
"count": 22818,
"self": 233.3979753850104
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.66999778029276e-07,
"count": 1,
"self": 9.66999778029276e-07
},
"TrainerController._save_models": {
"total": 0.08434482500024387,
"count": 1,
"self": 0.0012525719998848217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08309225300035905,
"count": 1,
"self": 0.08309225300035905
}
}
}
}
}
}
}