ppo-Huggy / run_logs /timers.json
agathanonymous's picture
Huggy
d66ccba verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.402489185333252,
"min": 1.402489185333252,
"max": 1.4242887496948242,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70867.78125,
"min": 69347.4296875,
"max": 76460.0,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.11026615969581,
"min": 81.07060755336617,
"max": 381.0381679389313,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49502.0,
"min": 48829.0,
"max": 50070.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999992.0,
"min": 49490.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999992.0,
"min": 49490.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4200994968414307,
"min": -0.007794059347361326,
"max": 2.4511916637420654,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1272.9722900390625,
"min": -1.0132277011871338,
"max": 1463.220703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7170331112105583,
"min": 1.668142564021624,
"max": 3.8940764688238314,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1955.1594164967537,
"min": 216.85853332281113,
"max": 2327.3433454036713,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7170331112105583,
"min": 1.668142564021624,
"max": 3.8940764688238314,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1955.1594164967537,
"min": 216.85853332281113,
"max": 2327.3433454036713,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01707499316932323,
"min": 0.013570245574373984,
"max": 0.020202330841857477,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051224979507969684,
"min": 0.027140491148747968,
"max": 0.058712793236675984,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05690838740103774,
"min": 0.024145034297058977,
"max": 0.05958599249521891,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17072516220311323,
"min": 0.048290068594117955,
"max": 0.17309982900818188,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.226498924533341e-06,
"min": 3.226498924533341e-06,
"max": 0.0002953322265559249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.679496773600022e-06,
"min": 9.679496773600022e-06,
"max": 0.0008441391186202998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10107546666666667,
"min": 0.10107546666666667,
"max": 0.19844407499999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032264,
"min": 0.20730389999999999,
"max": 0.5813797,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.366578666666679e-05,
"min": 6.366578666666679e-05,
"max": 0.0049223593425,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019099736000000037,
"min": 0.00019099736000000037,
"max": 0.014070847029999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716759027",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1716759700"
},
"total": 673.1599711939998,
"count": 1,
"self": 0.1167667719983001,
"children": {
"run_training.setup": {
"total": 0.01983320200088201,
"count": 1,
"self": 0.01983320200088201
},
"TrainerController.start_learning": {
"total": 673.0233712200006,
"count": 1,
"self": 1.6538535550407687,
"children": {
"TrainerController._reset_env": {
"total": 0.7132719609999185,
"count": 1,
"self": 0.7132719609999185
},
"TrainerController.advance": {
"total": 670.6192192529597,
"count": 232426,
"self": 1.5579137830409309,
"children": {
"env_step": {
"total": 512.6945005275957,
"count": 232426,
"self": 405.21575967513127,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.40502162860685,
"count": 232426,
"self": 6.200861928669838,
"children": {
"TorchPolicy.evaluate": {
"total": 100.20415969993701,
"count": 223050,
"self": 100.20415969993701
}
}
},
"workers": {
"total": 1.0737192238575517,
"count": 232426,
"self": 0.0,
"children": {
"worker_root": {
"total": 670.9738316960611,
"count": 232426,
"is_parallel": true,
"self": 355.8014379339638,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0004837969991058344,
"count": 1,
"is_parallel": true,
"self": 0.00011554899901966564,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00036824800008616876,
"count": 2,
"is_parallel": true,
"self": 0.00036824800008616876
}
}
},
"UnityEnvironment.step": {
"total": 0.008337327000845107,
"count": 1,
"is_parallel": true,
"self": 0.00011210599950572941,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00010669000039342791,
"count": 1,
"is_parallel": true,
"self": 0.00010669000039342791
},
"communicator.exchange": {
"total": 0.007929334000436938,
"count": 1,
"is_parallel": true,
"self": 0.007929334000436938
},
"steps_from_proto": {
"total": 0.00018919700050901156,
"count": 1,
"is_parallel": true,
"self": 4.6781999117229134e-05,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00014241500139178243,
"count": 2,
"is_parallel": true,
"self": 0.00014241500139178243
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 315.1723937620973,
"count": 232425,
"is_parallel": true,
"self": 9.822277168806977,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 17.08864757183983,
"count": 232425,
"is_parallel": true,
"self": 17.08864757183983
},
"communicator.exchange": {
"total": 265.86610765343903,
"count": 232425,
"is_parallel": true,
"self": 265.86610765343903
},
"steps_from_proto": {
"total": 22.395361368011436,
"count": 232425,
"is_parallel": true,
"self": 7.880983222832583,
"children": {
"_process_rank_one_or_two_observation": {
"total": 14.514378145178853,
"count": 464850,
"is_parallel": true,
"self": 14.514378145178853
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 156.36680494232314,
"count": 232426,
"self": 2.378727032453753,
"children": {
"process_trajectory": {
"total": 55.04814391588479,
"count": 232426,
"self": 54.66188722788502,
"children": {
"RLTrainer._checkpoint": {
"total": 0.38625668799977575,
"count": 10,
"self": 0.38625668799977575
}
}
},
"_update_policy": {
"total": 98.9399339939846,
"count": 97,
"self": 82.68051459603157,
"children": {
"TorchPPOOptimizer.update": {
"total": 16.259419397953025,
"count": 2910,
"self": 16.259419397953025
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.2099964048247784e-07,
"count": 1,
"self": 4.2099964048247784e-07
},
"TrainerController._save_models": {
"total": 0.03702603000056115,
"count": 1,
"self": 0.0006408610006474191,
"children": {
"RLTrainer._checkpoint": {
"total": 0.03638516899991373,
"count": 1,
"self": 0.03638516899991373
}
}
}
}
}
}
}