ppo-Huggy / run_logs /timers.json
justlotw's picture
Huggy
828453d
raw
history blame contribute delete
No virus
17.8 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4038923978805542,
"min": 1.4038923978805542,
"max": 1.430087924003601,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70111.7890625,
"min": 68429.484375,
"max": 79294.96875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 108.37855579868709,
"min": 85.31533101045297,
"max": 400.3888888888889,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49529.0,
"min": 48971.0,
"max": 50449.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999532.0,
"min": 49916.0,
"max": 1999532.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999532.0,
"min": 49916.0,
"max": 1999532.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3512637615203857,
"min": 0.1344415694475174,
"max": 2.422147035598755,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1074.527587890625,
"min": 16.80519676208496,
"max": 1369.00244140625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.609942781455668,
"min": 1.929640699148178,
"max": 3.8624280621759763,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1649.7438511252403,
"min": 241.20508739352226,
"max": 2167.038027346134,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.609942781455668,
"min": 1.929640699148178,
"max": 3.8624280621759763,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1649.7438511252403,
"min": 241.20508739352226,
"max": 2167.038027346134,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014613893148230597,
"min": 0.0123652809173412,
"max": 0.02031189067573804,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04384167944469179,
"min": 0.0247305618346824,
"max": 0.06093567202721412,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04954227449165451,
"min": 0.020298454848428567,
"max": 0.06071181719501813,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14862682347496353,
"min": 0.040596909696857135,
"max": 0.16950150852402052,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3743488752500005e-06,
"min": 3.3743488752500005e-06,
"max": 0.0002953606515464499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0123046625750001e-05,
"min": 1.0123046625750001e-05,
"max": 0.0008443657685447498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10112475,
"min": 0.10112475,
"max": 0.19845355,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30337425,
"min": 0.20740675000000003,
"max": 0.5814552500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.6125025e-05,
"min": 6.6125025e-05,
"max": 0.004922832145,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.000198375075,
"min": 0.000198375075,
"max": 0.014074616975000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676374250",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676376744"
},
"total": 2494.47160133,
"count": 1,
"self": 0.4416952259998652,
"children": {
"run_training.setup": {
"total": 0.11873501699994904,
"count": 1,
"self": 0.11873501699994904
},
"TrainerController.start_learning": {
"total": 2493.911171087,
"count": 1,
"self": 4.510901194001235,
"children": {
"TrainerController._reset_env": {
"total": 10.589457094000068,
"count": 1,
"self": 10.589457094000068
},
"TrainerController.advance": {
"total": 2478.6935687019986,
"count": 231672,
"self": 4.5854351409384435,
"children": {
"env_step": {
"total": 1933.1873268450208,
"count": 231672,
"self": 1613.694657007066,
"children": {
"SubprocessEnvManager._take_step": {
"total": 316.5626814599675,
"count": 231672,
"self": 16.0297840849222,
"children": {
"TorchPolicy.evaluate": {
"total": 300.5328973750453,
"count": 222997,
"self": 74.63434062591978,
"children": {
"TorchPolicy.sample_actions": {
"total": 225.89855674912553,
"count": 222997,
"self": 225.89855674912553
}
}
}
}
},
"workers": {
"total": 2.929988377987229,
"count": 231672,
"self": 0.0,
"children": {
"worker_root": {
"total": 2485.037331834195,
"count": 231672,
"is_parallel": true,
"self": 1174.2031708331524,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005779372000006333,
"count": 1,
"is_parallel": true,
"self": 0.0003949890001422318,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005384382999864101,
"count": 2,
"is_parallel": true,
"self": 0.005384382999864101
}
}
},
"UnityEnvironment.step": {
"total": 0.030352399999969748,
"count": 1,
"is_parallel": true,
"self": 0.0003016459999116705,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023038200004066312,
"count": 1,
"is_parallel": true,
"self": 0.00023038200004066312
},
"communicator.exchange": {
"total": 0.028966486000058467,
"count": 1,
"is_parallel": true,
"self": 0.028966486000058467
},
"steps_from_proto": {
"total": 0.0008538859999589477,
"count": 1,
"is_parallel": true,
"self": 0.00025311300009889237,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006007729998600553,
"count": 2,
"is_parallel": true,
"self": 0.0006007729998600553
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1310.8341610010425,
"count": 231671,
"is_parallel": true,
"self": 39.303181729871994,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.15272567310285,
"count": 231671,
"is_parallel": true,
"self": 81.15272567310285
},
"communicator.exchange": {
"total": 1095.0657135769857,
"count": 231671,
"is_parallel": true,
"self": 1095.0657135769857
},
"steps_from_proto": {
"total": 95.3125400210821,
"count": 231671,
"is_parallel": true,
"self": 38.75461774800931,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.55792227307279,
"count": 463342,
"is_parallel": true,
"self": 56.55792227307279
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 540.9208067160394,
"count": 231672,
"self": 7.22177492094454,
"children": {
"process_trajectory": {
"total": 164.285700441093,
"count": 231672,
"self": 162.88978879709407,
"children": {
"RLTrainer._checkpoint": {
"total": 1.395911643998943,
"count": 10,
"self": 1.395911643998943
}
}
},
"_update_policy": {
"total": 369.4133313540018,
"count": 97,
"self": 309.80879755399576,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.60453380000604,
"count": 2910,
"self": 59.60453380000604
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.240002327715047e-07,
"count": 1,
"self": 9.240002327715047e-07
},
"TrainerController._save_models": {
"total": 0.11724317300013354,
"count": 1,
"self": 0.0021288030002324376,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1151143699999011,
"count": 1,
"self": 0.1151143699999011
}
}
}
}
}
}
}