ppo-Huggy / run_logs /timers.json
dgodderis's picture
Huggy
8ff9165
raw
history blame
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4015657901763916,
"min": 1.4015657901763916,
"max": 1.424828052520752,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68874.34375,
"min": 67997.265625,
"max": 78310.984375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 119.18028846153847,
"min": 95.75241779497098,
"max": 404.312,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49579.0,
"min": 48937.0,
"max": 50539.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999988.0,
"min": 49991.0,
"max": 1999988.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999988.0,
"min": 49991.0,
"max": 1999988.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3247976303100586,
"min": 0.07618013769388199,
"max": 2.4104013442993164,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 967.1158447265625,
"min": 9.44633674621582,
"max": 1246.177490234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5613559629195013,
"min": 1.9056676698307837,
"max": 3.8290140249512414,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1481.5240805745125,
"min": 236.30279105901718,
"max": 1979.6002508997917,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5613559629195013,
"min": 1.9056676698307837,
"max": 3.8290140249512414,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1481.5240805745125,
"min": 236.30279105901718,
"max": 1979.6002508997917,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01470376746316712,
"min": 0.01397692685010649,
"max": 0.021061351063932914,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04411130238950136,
"min": 0.02795385370021298,
"max": 0.05218311695110363,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.045500356083114944,
"min": 0.022357695156501402,
"max": 0.059711680975225244,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.13650106824934483,
"min": 0.04481953624635935,
"max": 0.17913504292567572,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1160989613333295e-06,
"min": 3.1160989613333295e-06,
"max": 0.00029536747654417494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.348296883999988e-06,
"min": 9.348296883999988e-06,
"max": 0.0008441530686156499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10103866666666668,
"min": 0.10103866666666668,
"max": 0.19845582499999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30311600000000005,
"min": 0.20725235000000003,
"max": 0.58138435,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.182946666666661e-05,
"min": 6.182946666666661e-05,
"max": 0.0049229456674999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018548839999999982,
"min": 0.00018548839999999982,
"max": 0.014071079064999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676735327",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676737636"
},
"total": 2308.8120527799997,
"count": 1,
"self": 0.41602737999937744,
"children": {
"run_training.setup": {
"total": 0.1175770410000041,
"count": 1,
"self": 0.1175770410000041
},
"TrainerController.start_learning": {
"total": 2308.2784483590003,
"count": 1,
"self": 4.012932407040353,
"children": {
"TrainerController._reset_env": {
"total": 11.091961132000023,
"count": 1,
"self": 11.091961132000023
},
"TrainerController.advance": {
"total": 2293.0602806899597,
"count": 230999,
"self": 4.4500722149814465,
"children": {
"env_step": {
"total": 1788.668125187943,
"count": 230999,
"self": 1491.6165483918985,
"children": {
"SubprocessEnvManager._take_step": {
"total": 294.4167959131164,
"count": 230999,
"self": 15.140939468138015,
"children": {
"TorchPolicy.evaluate": {
"total": 279.2758564449784,
"count": 222920,
"self": 69.0899712040059,
"children": {
"TorchPolicy.sample_actions": {
"total": 210.18588524097248,
"count": 222920,
"self": 210.18588524097248
}
}
}
}
},
"workers": {
"total": 2.634780882928112,
"count": 230999,
"self": 0.0,
"children": {
"worker_root": {
"total": 2300.1258520599595,
"count": 230999,
"is_parallel": true,
"self": 1087.3941067919282,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004979966999997032,
"count": 1,
"is_parallel": true,
"self": 0.0005496979999861651,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004430269000010867,
"count": 2,
"is_parallel": true,
"self": 0.004430269000010867
}
}
},
"UnityEnvironment.step": {
"total": 0.029401465999995935,
"count": 1,
"is_parallel": true,
"self": 0.0002894279999736682,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002139880000129324,
"count": 1,
"is_parallel": true,
"self": 0.0002139880000129324
},
"communicator.exchange": {
"total": 0.028057265999990477,
"count": 1,
"is_parallel": true,
"self": 0.028057265999990477
},
"steps_from_proto": {
"total": 0.0008407840000188571,
"count": 1,
"is_parallel": true,
"self": 0.00039906599999994796,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00044171800001890915,
"count": 2,
"is_parallel": true,
"self": 0.00044171800001890915
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1212.7317452680313,
"count": 230998,
"is_parallel": true,
"self": 37.29400020501248,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.39211793503645,
"count": 230998,
"is_parallel": true,
"self": 77.39211793503645
},
"communicator.exchange": {
"total": 1008.6424360199662,
"count": 230998,
"is_parallel": true,
"self": 1008.6424360199662
},
"steps_from_proto": {
"total": 89.40319110801633,
"count": 230998,
"is_parallel": true,
"self": 36.27716216303568,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.126028944980646,
"count": 461996,
"is_parallel": true,
"self": 53.126028944980646
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 499.94208328703553,
"count": 230999,
"self": 6.774577708016295,
"children": {
"process_trajectory": {
"total": 152.153562637019,
"count": 230999,
"self": 150.84441826501902,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3091443719999916,
"count": 10,
"self": 1.3091443719999916
}
}
},
"_update_policy": {
"total": 341.01394294200026,
"count": 97,
"self": 284.2018630430045,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.81207989899576,
"count": 2910,
"self": 56.81207989899576
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.410002010350581e-07,
"count": 1,
"self": 8.410002010350581e-07
},
"TrainerController._save_models": {
"total": 0.1132732890000625,
"count": 1,
"self": 0.0022223910000320757,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11105089800003043,
"count": 1,
"self": 0.11105089800003043
}
}
}
}
}
}
}