ppo-Huggy / run_logs /timers.json
Brain22's picture
Huggy
4bc383a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4061120748519897,
"min": 1.4061120748519897,
"max": 1.4267741441726685,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69362.1015625,
"min": 69213.9765625,
"max": 76452.046875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 79.46141479099678,
"min": 77.165625,
"max": 424.35593220338984,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49425.0,
"min": 48748.0,
"max": 50074.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999982.0,
"min": 49676.0,
"max": 1999982.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999982.0,
"min": 49676.0,
"max": 1999982.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5065789222717285,
"min": 0.08302275836467743,
"max": 2.510711193084717,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1559.0921630859375,
"min": 9.713663101196289,
"max": 1587.3271484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9222897170440945,
"min": 1.802818043109698,
"max": 3.964072601352301,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2439.6642040014267,
"min": 210.9297110438347,
"max": 2479.9157714247704,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9222897170440945,
"min": 1.802818043109698,
"max": 3.964072601352301,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2439.6642040014267,
"min": 210.9297110438347,
"max": 2479.9157714247704,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019167484158404278,
"min": 0.01247369193721776,
"max": 0.02074572205892764,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05750245247521283,
"min": 0.02494738387443552,
"max": 0.059582450540134835,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0588381106654803,
"min": 0.022056519395361344,
"max": 0.06230752586076657,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1765143319964409,
"min": 0.04411303879072269,
"max": 0.17768287907044092,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.634948788383333e-06,
"min": 3.634948788383333e-06,
"max": 0.00029532457655847503,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0904846365149998e-05,
"min": 1.0904846365149998e-05,
"max": 0.0008440908186364,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10121161666666668,
"min": 0.10121161666666668,
"max": 0.19844152499999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30363485000000007,
"min": 0.20754930000000005,
"max": 0.5813636000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.045967166666667e-05,
"min": 7.045967166666667e-05,
"max": 0.004922232097499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.000211379015,
"min": 0.000211379015,
"max": 0.014070043640000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675315515",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675318403"
},
"total": 2887.7907828290004,
"count": 1,
"self": 0.4409691900004873,
"children": {
"run_training.setup": {
"total": 0.1174508910000327,
"count": 1,
"self": 0.1174508910000327
},
"TrainerController.start_learning": {
"total": 2887.232362748,
"count": 1,
"self": 6.3625030459170375,
"children": {
"TrainerController._reset_env": {
"total": 10.262452608000046,
"count": 1,
"self": 10.262452608000046
},
"TrainerController.advance": {
"total": 2870.474911503083,
"count": 232476,
"self": 6.224148381963914,
"children": {
"env_step": {
"total": 2323.775656812009,
"count": 232476,
"self": 1943.5815300849797,
"children": {
"SubprocessEnvManager._take_step": {
"total": 376.20036624307437,
"count": 232476,
"self": 20.260602902092103,
"children": {
"TorchPolicy.evaluate": {
"total": 355.93976334098227,
"count": 222858,
"self": 89.30371098685686,
"children": {
"TorchPolicy.sample_actions": {
"total": 266.6360523541254,
"count": 222858,
"self": 266.6360523541254
}
}
}
}
},
"workers": {
"total": 3.9937604839548158,
"count": 232476,
"self": 0.0,
"children": {
"worker_root": {
"total": 2875.904120629075,
"count": 232476,
"is_parallel": true,
"self": 1293.6364119880031,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020708069999955114,
"count": 1,
"is_parallel": true,
"self": 0.0004047340000852273,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016660729999102841,
"count": 2,
"is_parallel": true,
"self": 0.0016660729999102841
}
}
},
"UnityEnvironment.step": {
"total": 0.035881140000014966,
"count": 1,
"is_parallel": true,
"self": 0.0003181240000458274,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020954799998662565,
"count": 1,
"is_parallel": true,
"self": 0.00020954799998662565
},
"communicator.exchange": {
"total": 0.03453699599998572,
"count": 1,
"is_parallel": true,
"self": 0.03453699599998572
},
"steps_from_proto": {
"total": 0.0008164719999967929,
"count": 1,
"is_parallel": true,
"self": 0.0002813099999343649,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000535162000062428,
"count": 2,
"is_parallel": true,
"self": 0.000535162000062428
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1582.2677086410717,
"count": 232475,
"is_parallel": true,
"self": 44.24794475801218,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 100.12431639401234,
"count": 232475,
"is_parallel": true,
"self": 100.12431639401234
},
"communicator.exchange": {
"total": 1316.196815887007,
"count": 232475,
"is_parallel": true,
"self": 1316.196815887007
},
"steps_from_proto": {
"total": 121.69863160204017,
"count": 232475,
"is_parallel": true,
"self": 51.604827990106,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.09380361193416,
"count": 464950,
"is_parallel": true,
"self": 70.09380361193416
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 540.4751063091101,
"count": 232476,
"self": 9.76223327506841,
"children": {
"process_trajectory": {
"total": 182.97354665004133,
"count": 232476,
"self": 181.71324003104183,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2603066189994934,
"count": 10,
"self": 1.2603066189994934
}
}
},
"_update_policy": {
"total": 347.7393263840004,
"count": 97,
"self": 290.22812849799794,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.511197886002435,
"count": 2910,
"self": 57.511197886002435
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2149998838140164e-06,
"count": 1,
"self": 1.2149998838140164e-06
},
"TrainerController._save_models": {
"total": 0.13249437600006786,
"count": 1,
"self": 0.002199364000261994,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13029501199980587,
"count": 1,
"self": 0.13029501199980587
}
}
}
}
}
}
}