ppo-Huggy / run_logs /timers.json
greyfoss's picture
Huggy
a636ed8 verified
raw
history blame
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.406569004058838,
"min": 1.406569004058838,
"max": 1.427618145942688,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71715.328125,
"min": 68864.4375,
"max": 77707.6953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.41231343283582,
"min": 86.75438596491227,
"max": 428.78632478632477,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49533.0,
"min": 48979.0,
"max": 50168.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999973.0,
"min": 49837.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999973.0,
"min": 49837.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3921518325805664,
"min": 0.1512616127729416,
"max": 2.462287664413452,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1282.193359375,
"min": 17.54634666442871,
"max": 1378.32470703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6785958466022763,
"min": 2.0059638589877506,
"max": 3.9367252754216846,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1971.72737377882,
"min": 232.69180764257908,
"max": 2133.829577922821,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6785958466022763,
"min": 2.0059638589877506,
"max": 3.9367252754216846,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1971.72737377882,
"min": 232.69180764257908,
"max": 2133.829577922821,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01909476819904133,
"min": 0.012740730841889874,
"max": 0.01909476819904133,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03818953639808266,
"min": 0.028926871768635464,
"max": 0.056147712310000014,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04922821292032798,
"min": 0.022720389471699794,
"max": 0.0579335410768787,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09845642584065596,
"min": 0.04544077894339959,
"max": 0.16571958797673383,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.371848542749996e-06,
"min": 4.371848542749996e-06,
"max": 0.0002953609515463499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.743697085499993e-06,
"min": 8.743697085499993e-06,
"max": 0.0008442738185753997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10145725000000003,
"min": 0.10145725000000003,
"max": 0.19845364999999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20291450000000005,
"min": 0.20291450000000005,
"max": 0.5814246000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.271677499999992e-05,
"min": 8.271677499999992e-05,
"max": 0.004922837135000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016543354999999985,
"min": 0.00016543354999999985,
"max": 0.014073087540000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705549869",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1705552591"
},
"total": 2722.373560876,
"count": 1,
"self": 0.49381376299970725,
"children": {
"run_training.setup": {
"total": 0.05871258000001944,
"count": 1,
"self": 0.05871258000001944
},
"TrainerController.start_learning": {
"total": 2721.8210345330003,
"count": 1,
"self": 4.926119791999099,
"children": {
"TrainerController._reset_env": {
"total": 3.695542660000001,
"count": 1,
"self": 3.695542660000001
},
"TrainerController.advance": {
"total": 2713.0211404110023,
"count": 232209,
"self": 5.595609872822479,
"children": {
"env_step": {
"total": 2189.125660520057,
"count": 232209,
"self": 1817.1817394490454,
"children": {
"SubprocessEnvManager._take_step": {
"total": 368.4612485891146,
"count": 232209,
"self": 18.838118044058774,
"children": {
"TorchPolicy.evaluate": {
"total": 349.6231305450558,
"count": 223019,
"self": 349.6231305450558
}
}
},
"workers": {
"total": 3.482672481896998,
"count": 232209,
"self": 0.0,
"children": {
"worker_root": {
"total": 2713.51603951911,
"count": 232209,
"is_parallel": true,
"self": 1233.6050377859094,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007525729999997566,
"count": 1,
"is_parallel": true,
"self": 0.00021119900020494242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005413739997948142,
"count": 2,
"is_parallel": true,
"self": 0.0005413739997948142
}
}
},
"UnityEnvironment.step": {
"total": 0.03397884200012413,
"count": 1,
"is_parallel": true,
"self": 0.00042436799981260265,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021611600004689535,
"count": 1,
"is_parallel": true,
"self": 0.00021611600004689535
},
"communicator.exchange": {
"total": 0.03249682200021198,
"count": 1,
"is_parallel": true,
"self": 0.03249682200021198
},
"steps_from_proto": {
"total": 0.0008415360000526562,
"count": 1,
"is_parallel": true,
"self": 0.00024255000016637496,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005989859998862812,
"count": 2,
"is_parallel": true,
"self": 0.0005989859998862812
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1479.9110017332007,
"count": 232208,
"is_parallel": true,
"self": 45.975332645148,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 100.60146812395647,
"count": 232208,
"is_parallel": true,
"self": 100.60146812395647
},
"communicator.exchange": {
"total": 1229.8799282821205,
"count": 232208,
"is_parallel": true,
"self": 1229.8799282821205
},
"steps_from_proto": {
"total": 103.45427268197568,
"count": 232208,
"is_parallel": true,
"self": 39.236931534034284,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.2173411479414,
"count": 464416,
"is_parallel": true,
"self": 64.2173411479414
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 518.2998700181229,
"count": 232209,
"self": 7.496407599219083,
"children": {
"process_trajectory": {
"total": 175.6741410519037,
"count": 232209,
"self": 174.14459116090552,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5295498909981688,
"count": 10,
"self": 1.5295498909981688
}
}
},
"_update_policy": {
"total": 335.1293213670001,
"count": 96,
"self": 270.12970202100496,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.99961934599514,
"count": 2880,
"self": 64.99961934599514
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.414999132975936e-06,
"count": 1,
"self": 1.414999132975936e-06
},
"TrainerController._save_models": {
"total": 0.17823025500001677,
"count": 1,
"self": 0.002171968999391538,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17605828600062523,
"count": 1,
"self": 0.17605828600062523
}
}
}
}
}
}
}