ppo-Huggy / run_logs /timers.json
Swadine's picture
Huggy
fef8497
raw
history blame contribute delete
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4080967903137207,
"min": 1.4080967903137207,
"max": 1.4315989017486572,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70478.0625,
"min": 68681.390625,
"max": 78473.0234375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.3338898163606,
"min": 78.84185303514377,
"max": 389.06201550387595,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48719.0,
"min": 48719.0,
"max": 50189.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999992.0,
"min": 49972.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999992.0,
"min": 49972.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4639127254486084,
"min": 0.10203772783279419,
"max": 2.495887041091919,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1475.8836669921875,
"min": 13.060829162597656,
"max": 1514.1387939453125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8660593362204816,
"min": 1.8529262313386425,
"max": 3.943551599262506,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2315.7695423960686,
"min": 237.17455761134624,
"max": 2370.562239050865,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8660593362204816,
"min": 1.8529262313386425,
"max": 3.943551599262506,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2315.7695423960686,
"min": 237.17455761134624,
"max": 2370.562239050865,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016409794742099217,
"min": 0.0130502052915593,
"max": 0.02091323134918639,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04922938422629765,
"min": 0.0261004105831186,
"max": 0.05592828783846926,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06048047327333026,
"min": 0.024398074702670178,
"max": 0.0610202041351133,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1814414198199908,
"min": 0.048796149405340355,
"max": 0.18306061240533988,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5459988180333347e-06,
"min": 3.5459988180333347e-06,
"max": 0.00029530852656382497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0637996454100004e-05,
"min": 1.0637996454100004e-05,
"max": 0.0008438907187030998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118196666666668,
"min": 0.10118196666666668,
"max": 0.198436175,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30354590000000004,
"min": 0.20749140000000008,
"max": 0.5812969000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.898013666666669e-05,
"min": 6.898013666666669e-05,
"max": 0.004921965132500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002069404100000001,
"min": 0.0002069404100000001,
"max": 0.014066715309999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702007192",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702009652"
},
"total": 2460.2762546510003,
"count": 1,
"self": 0.4351730280004631,
"children": {
"run_training.setup": {
"total": 0.06586626599994361,
"count": 1,
"self": 0.06586626599994361
},
"TrainerController.start_learning": {
"total": 2459.775215357,
"count": 1,
"self": 4.711548127927017,
"children": {
"TrainerController._reset_env": {
"total": 3.489222027999972,
"count": 1,
"self": 3.489222027999972
},
"TrainerController.advance": {
"total": 2451.4695461760734,
"count": 232770,
"self": 4.882692254195717,
"children": {
"env_step": {
"total": 1928.0591666888868,
"count": 232770,
"self": 1595.3475184069434,
"children": {
"SubprocessEnvManager._take_step": {
"total": 329.6881135459739,
"count": 232770,
"self": 17.244513490849386,
"children": {
"TorchPolicy.evaluate": {
"total": 312.44360005512453,
"count": 222956,
"self": 312.44360005512453
}
}
},
"workers": {
"total": 3.023534735969406,
"count": 232770,
"self": 0.0,
"children": {
"worker_root": {
"total": 2451.9027331899815,
"count": 232770,
"is_parallel": true,
"self": 1161.8875506720324,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008509300000696385,
"count": 1,
"is_parallel": true,
"self": 0.00024012700009734544,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000610802999972293,
"count": 2,
"is_parallel": true,
"self": 0.000610802999972293
}
}
},
"UnityEnvironment.step": {
"total": 0.02915978799990171,
"count": 1,
"is_parallel": true,
"self": 0.0002911450000055993,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019878000000517204,
"count": 1,
"is_parallel": true,
"self": 0.00019878000000517204
},
"communicator.exchange": {
"total": 0.0279724139999189,
"count": 1,
"is_parallel": true,
"self": 0.0279724139999189
},
"steps_from_proto": {
"total": 0.000697448999972039,
"count": 1,
"is_parallel": true,
"self": 0.00018394300002455566,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005135059999474834,
"count": 2,
"is_parallel": true,
"self": 0.0005135059999474834
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1290.0151825179491,
"count": 232769,
"is_parallel": true,
"self": 38.9599788866949,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.79283818702766,
"count": 232769,
"is_parallel": true,
"self": 82.79283818702766
},
"communicator.exchange": {
"total": 1078.1808811160981,
"count": 232769,
"is_parallel": true,
"self": 1078.1808811160981
},
"steps_from_proto": {
"total": 90.08148432812857,
"count": 232769,
"is_parallel": true,
"self": 31.360803417193893,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.720680910934675,
"count": 465538,
"is_parallel": true,
"self": 58.720680910934675
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 518.527687232991,
"count": 232770,
"self": 7.254703589033625,
"children": {
"process_trajectory": {
"total": 161.53409576095976,
"count": 232770,
"self": 160.3719590139591,
"children": {
"RLTrainer._checkpoint": {
"total": 1.162136747000659,
"count": 10,
"self": 1.162136747000659
}
}
},
"_update_policy": {
"total": 349.7388878829976,
"count": 97,
"self": 284.9708006319919,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.76808725100568,
"count": 2910,
"self": 64.76808725100568
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.879997039912269e-07,
"count": 1,
"self": 8.879997039912269e-07
},
"TrainerController._save_models": {
"total": 0.10489813699996375,
"count": 1,
"self": 0.001870050999968953,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1030280859999948,
"count": 1,
"self": 0.1030280859999948
}
}
}
}
}
}
}