ppo-Huggy / run_logs /timers.json
Adder's picture
Huggy
05bfbe6
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.405651569366455,
"min": 1.405651569366455,
"max": 1.4265307188034058,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71147.0546875,
"min": 68244.5859375,
"max": 78337.2265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.78456014362656,
"min": 88.78456014362656,
"max": 389.48062015503876,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49453.0,
"min": 48869.0,
"max": 50243.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999925.0,
"min": 49720.0,
"max": 1999925.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999925.0,
"min": 49720.0,
"max": 1999925.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3665900230407715,
"min": 0.01615036278963089,
"max": 2.427845001220703,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1318.190673828125,
"min": 2.067246437072754,
"max": 1318.190673828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6632659712428466,
"min": 1.7063825273653492,
"max": 3.7978423082508805,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2040.4391459822655,
"min": 218.4169635027647,
"max": 2040.4391459822655,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6632659712428466,
"min": 1.7063825273653492,
"max": 3.7978423082508805,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2040.4391459822655,
"min": 218.4169635027647,
"max": 2040.4391459822655,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014705885915706556,
"min": 0.013440026093545991,
"max": 0.019038749972403617,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.02941177183141311,
"min": 0.026880052187091982,
"max": 0.05711624991721085,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05202161855995655,
"min": 0.022894639242440465,
"max": 0.06871160926918189,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1040432371199131,
"min": 0.04578927848488093,
"max": 0.19097600169479845,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.528523490524996e-06,
"min": 4.528523490524996e-06,
"max": 0.00029527590157469994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.057046981049993e-06,
"min": 9.057046981049993e-06,
"max": 0.0008437944187351999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10150947499999996,
"min": 0.10150947499999996,
"max": 0.19842529999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20301894999999992,
"min": 0.20301894999999992,
"max": 0.5812648,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.532280249999994e-05,
"min": 8.532280249999994e-05,
"max": 0.00492142247,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001706456049999999,
"min": 0.0001706456049999999,
"max": 0.01406511352,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670861490",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670863644"
},
"total": 2153.923043034,
"count": 1,
"self": 0.3899735210002291,
"children": {
"run_training.setup": {
"total": 0.1069794790000742,
"count": 1,
"self": 0.1069794790000742
},
"TrainerController.start_learning": {
"total": 2153.4260900339996,
"count": 1,
"self": 3.7048133019311535,
"children": {
"TrainerController._reset_env": {
"total": 11.208578579000005,
"count": 1,
"self": 11.208578579000005
},
"TrainerController.advance": {
"total": 2138.404176518068,
"count": 231322,
"self": 4.010063808010273,
"children": {
"env_step": {
"total": 1680.58411943509,
"count": 231322,
"self": 1413.2548568350758,
"children": {
"SubprocessEnvManager._take_step": {
"total": 264.811525737922,
"count": 231322,
"self": 13.85309673191864,
"children": {
"TorchPolicy.evaluate": {
"total": 250.95842900600337,
"count": 222910,
"self": 63.527597162866186,
"children": {
"TorchPolicy.sample_actions": {
"total": 187.43083184313718,
"count": 222910,
"self": 187.43083184313718
}
}
}
}
},
"workers": {
"total": 2.517736862092306,
"count": 231322,
"self": 0.0,
"children": {
"worker_root": {
"total": 2145.525042555109,
"count": 231322,
"is_parallel": true,
"self": 982.8763971502126,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020058969998899556,
"count": 1,
"is_parallel": true,
"self": 0.00029291000009834534,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017129869997916103,
"count": 2,
"is_parallel": true,
"self": 0.0017129869997916103
}
}
},
"UnityEnvironment.step": {
"total": 0.027173313000048438,
"count": 1,
"is_parallel": true,
"self": 0.00026347600010012684,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020456299989746185,
"count": 1,
"is_parallel": true,
"self": 0.00020456299989746185
},
"communicator.exchange": {
"total": 0.02575006699998994,
"count": 1,
"is_parallel": true,
"self": 0.02575006699998994
},
"steps_from_proto": {
"total": 0.0009552070000609092,
"count": 1,
"is_parallel": true,
"self": 0.0002766820000488224,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006785250000120868,
"count": 2,
"is_parallel": true,
"self": 0.0006785250000120868
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1162.6486454048963,
"count": 231321,
"is_parallel": true,
"self": 34.39001131182772,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.42579087292665,
"count": 231321,
"is_parallel": true,
"self": 74.42579087292665
},
"communicator.exchange": {
"total": 962.3054416501334,
"count": 231321,
"is_parallel": true,
"self": 962.3054416501334
},
"steps_from_proto": {
"total": 91.52740157000858,
"count": 231321,
"is_parallel": true,
"self": 37.49607249120504,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.03132907880354,
"count": 462642,
"is_parallel": true,
"self": 54.03132907880354
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 453.80999327496784,
"count": 231322,
"self": 5.794620224915207,
"children": {
"process_trajectory": {
"total": 139.94325552605324,
"count": 231322,
"self": 139.4778701360533,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4653853899999376,
"count": 4,
"self": 0.4653853899999376
}
}
},
"_update_policy": {
"total": 308.0721175239994,
"count": 96,
"self": 255.29030734399817,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.78181018000123,
"count": 2880,
"self": 52.78181018000123
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.58000327955233e-07,
"count": 1,
"self": 9.58000327955233e-07
},
"TrainerController._save_models": {
"total": 0.10852067699988766,
"count": 1,
"self": 0.0019262840000919823,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10659439299979567,
"count": 1,
"self": 0.10659439299979567
}
}
}
}
}
}
}