ppo-Huggy / run_logs /timers.json
Panyongcan
Huggy
60ee6e1
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4082794189453125,
"min": 1.4082794189453125,
"max": 1.4279576539993286,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69778.8359375,
"min": 68273.9140625,
"max": 77411.3203125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.06163328197226,
"min": 76.06163328197226,
"max": 389.625,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49364.0,
"min": 48880.0,
"max": 50045.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999928.0,
"min": 49359.0,
"max": 1999928.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999928.0,
"min": 49359.0,
"max": 1999928.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4198222160339355,
"min": 0.22671033442020416,
"max": 2.4931559562683105,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1570.464599609375,
"min": 28.792211532592773,
"max": 1583.34033203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7236497296574305,
"min": 1.6590480396128076,
"max": 3.957928076237935,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2416.6486745476723,
"min": 210.69910103082657,
"max": 2481.089023590088,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7236497296574305,
"min": 1.6590480396128076,
"max": 3.957928076237935,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2416.6486745476723,
"min": 210.69910103082657,
"max": 2481.089023590088,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01824047023934933,
"min": 0.013599547921330668,
"max": 0.01990341748524871,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.054721410718047994,
"min": 0.027199095842661336,
"max": 0.05971025245574613,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06473746101061502,
"min": 0.020093135597805182,
"max": 0.06566971863309543,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.19421238303184507,
"min": 0.040186271195610364,
"max": 0.19421238303184507,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4824988391999957e-06,
"min": 3.4824988391999957e-06,
"max": 0.00029530852656382497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0447496517599988e-05,
"min": 1.0447496517599988e-05,
"max": 0.00084410326863225,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116080000000001,
"min": 0.10116080000000001,
"max": 0.198436175,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30348240000000004,
"min": 0.20745405,
"max": 0.5813677500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.792391999999995e-05,
"min": 6.792391999999995e-05,
"max": 0.004921965132499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020377175999999986,
"min": 0.00020377175999999986,
"max": 0.014070250725,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679447368",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679449678"
},
"total": 2310.185081147,
"count": 1,
"self": 0.442803437999828,
"children": {
"run_training.setup": {
"total": 0.10590352700000949,
"count": 1,
"self": 0.10590352700000949
},
"TrainerController.start_learning": {
"total": 2309.636374182,
"count": 1,
"self": 4.284643370973299,
"children": {
"TrainerController._reset_env": {
"total": 9.748138215999916,
"count": 1,
"self": 9.748138215999916
},
"TrainerController.advance": {
"total": 2295.492896643027,
"count": 232428,
"self": 4.434772680197057,
"children": {
"env_step": {
"total": 1780.9908391709046,
"count": 232428,
"self": 1506.6792819150537,
"children": {
"SubprocessEnvManager._take_step": {
"total": 271.52740860888105,
"count": 232428,
"self": 16.80435331085812,
"children": {
"TorchPolicy.evaluate": {
"total": 254.72305529802293,
"count": 222892,
"self": 254.72305529802293
}
}
},
"workers": {
"total": 2.784148646969925,
"count": 232428,
"self": 0.0,
"children": {
"worker_root": {
"total": 2301.7891625379425,
"count": 232428,
"is_parallel": true,
"self": 1075.5513798840004,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009503290000338893,
"count": 1,
"is_parallel": true,
"self": 0.00027454199994281225,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006757870000910771,
"count": 2,
"is_parallel": true,
"self": 0.0006757870000910771
}
}
},
"UnityEnvironment.step": {
"total": 0.045799051000017243,
"count": 1,
"is_parallel": true,
"self": 0.0003316669999549049,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002309120000063558,
"count": 1,
"is_parallel": true,
"self": 0.0002309120000063558
},
"communicator.exchange": {
"total": 0.044530980999979874,
"count": 1,
"is_parallel": true,
"self": 0.044530980999979874
},
"steps_from_proto": {
"total": 0.0007054910000761083,
"count": 1,
"is_parallel": true,
"self": 0.00021572700006800005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004897640000081083,
"count": 2,
"is_parallel": true,
"self": 0.0004897640000081083
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1226.237782653942,
"count": 232427,
"is_parallel": true,
"self": 37.74534997795331,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.0536024000121,
"count": 232427,
"is_parallel": true,
"self": 76.0536024000121
},
"communicator.exchange": {
"total": 1023.6912337200307,
"count": 232427,
"is_parallel": true,
"self": 1023.6912337200307
},
"steps_from_proto": {
"total": 88.74759655594619,
"count": 232427,
"is_parallel": true,
"self": 33.62102251388069,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.1265740420655,
"count": 464854,
"is_parallel": true,
"self": 55.1265740420655
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 510.0672847919253,
"count": 232428,
"self": 6.542161388900126,
"children": {
"process_trajectory": {
"total": 141.31590547702592,
"count": 232428,
"self": 140.0061662480265,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3097392289994332,
"count": 10,
"self": 1.3097392289994332
}
}
},
"_update_policy": {
"total": 362.20921792599927,
"count": 97,
"self": 304.822518561995,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.386699364004244,
"count": 2910,
"self": 57.386699364004244
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2519999472715426e-06,
"count": 1,
"self": 1.2519999472715426e-06
},
"TrainerController._save_models": {
"total": 0.11069469999983994,
"count": 1,
"self": 0.002138234000085504,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10855646599975444,
"count": 1,
"self": 0.10855646599975444
}
}
}
}
}
}
}