ppo-Huggy / run_logs /timers.json
AdiKompella's picture
Huggy
1a3bb6c
raw
history blame
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.400299072265625,
"min": 1.400299072265625,
"max": 1.4274228811264038,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69986.9453125,
"min": 68474.5234375,
"max": 78513.921875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.40034965034965,
"min": 73.76347305389221,
"max": 404.26612903225805,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49421.0,
"min": 48724.0,
"max": 50129.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999980.0,
"min": 49895.0,
"max": 1999980.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999980.0,
"min": 49895.0,
"max": 1999980.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4926910400390625,
"min": 0.1457328200340271,
"max": 2.506725549697876,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1425.8192138671875,
"min": 17.92513656616211,
"max": 1632.32763671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.846360136162151,
"min": 1.7908403829830448,
"max": 4.018891125725162,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2200.1179978847504,
"min": 220.27336710691452,
"max": 2633.8355873823166,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.846360136162151,
"min": 1.7908403829830448,
"max": 4.018891125725162,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2200.1179978847504,
"min": 220.27336710691452,
"max": 2633.8355873823166,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015821519006779756,
"min": 0.012488718456976737,
"max": 0.0205345105927942,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04746455702033927,
"min": 0.024977436913953474,
"max": 0.058720966811718726,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.058217582354942954,
"min": 0.02047190396115184,
"max": 0.06092919040885236,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17465274706482886,
"min": 0.04094380792230368,
"max": 0.1827875712265571,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.8421487193166635e-06,
"min": 3.8421487193166635e-06,
"max": 0.00029530747656417503,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.152644615794999e-05,
"min": 1.152644615794999e-05,
"max": 0.0008442117185960999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128068333333334,
"min": 0.10128068333333334,
"max": 0.19843582499999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30384205000000003,
"min": 0.20769769999999993,
"max": 0.5814039,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.390609833333329e-05,
"min": 7.390609833333329e-05,
"max": 0.004921947667499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022171829499999989,
"min": 0.00022171829499999989,
"max": 0.014072054609999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672036747",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672038996"
},
"total": 2248.8344034300003,
"count": 1,
"self": 0.45002841100040314,
"children": {
"run_training.setup": {
"total": 0.11391925300006278,
"count": 1,
"self": 0.11391925300006278
},
"TrainerController.start_learning": {
"total": 2248.270455766,
"count": 1,
"self": 3.8074966100803067,
"children": {
"TrainerController._reset_env": {
"total": 9.545143664999955,
"count": 1,
"self": 9.545143664999955
},
"TrainerController.advance": {
"total": 2234.80112889892,
"count": 233215,
"self": 3.914636908841203,
"children": {
"env_step": {
"total": 1749.9959939420407,
"count": 233215,
"self": 1471.2259382900697,
"children": {
"SubprocessEnvManager._take_step": {
"total": 276.2135089940499,
"count": 233215,
"self": 14.070031714941024,
"children": {
"TorchPolicy.evaluate": {
"total": 262.1434772791089,
"count": 222964,
"self": 65.42770486709765,
"children": {
"TorchPolicy.sample_actions": {
"total": 196.71577241201123,
"count": 222964,
"self": 196.71577241201123
}
}
}
}
},
"workers": {
"total": 2.5565466579211034,
"count": 233215,
"self": 0.0,
"children": {
"worker_root": {
"total": 2240.70013061396,
"count": 233215,
"is_parallel": true,
"self": 1027.336393327867,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023734279999416685,
"count": 1,
"is_parallel": true,
"self": 0.0003770519998624877,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001996376000079181,
"count": 2,
"is_parallel": true,
"self": 0.001996376000079181
}
}
},
"UnityEnvironment.step": {
"total": 0.02716755199992349,
"count": 1,
"is_parallel": true,
"self": 0.00033922199986591295,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019188700002814585,
"count": 1,
"is_parallel": true,
"self": 0.00019188700002814585
},
"communicator.exchange": {
"total": 0.025847496999972464,
"count": 1,
"is_parallel": true,
"self": 0.025847496999972464
},
"steps_from_proto": {
"total": 0.000788946000056967,
"count": 1,
"is_parallel": true,
"self": 0.0002749049999692943,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005140410000876727,
"count": 2,
"is_parallel": true,
"self": 0.0005140410000876727
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1213.363737286093,
"count": 233214,
"is_parallel": true,
"self": 34.963868368045496,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.43656320104185,
"count": 233214,
"is_parallel": true,
"self": 76.43656320104185
},
"communicator.exchange": {
"total": 1008.1058482859477,
"count": 233214,
"is_parallel": true,
"self": 1008.1058482859477
},
"steps_from_proto": {
"total": 93.85745743105804,
"count": 233214,
"is_parallel": true,
"self": 38.54955233610963,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.30790509494841,
"count": 466428,
"is_parallel": true,
"self": 55.30790509494841
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 480.8904980480379,
"count": 233215,
"self": 5.945233951227692,
"children": {
"process_trajectory": {
"total": 153.4257775728105,
"count": 233215,
"self": 152.18474056881053,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2410370039999634,
"count": 10,
"self": 1.2410370039999634
}
}
},
"_update_policy": {
"total": 321.5194865239997,
"count": 97,
"self": 268.2412857119938,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.278200812005934,
"count": 2910,
"self": 53.278200812005934
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.089996299531776e-07,
"count": 1,
"self": 8.089996299531776e-07
},
"TrainerController._save_models": {
"total": 0.1166857829998662,
"count": 1,
"self": 0.002079122999930405,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1146066599999358,
"count": 1,
"self": 0.1146066599999358
}
}
}
}
}
}
}