ppo-Huggy / run_logs /timers.json
luizapzbn's picture
Huggy
30a3b7e
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4045294523239136,
"min": 1.4045294523239136,
"max": 1.4285621643066406,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71136.609375,
"min": 69652.15625,
"max": 76074.984375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 71.87463556851311,
"min": 71.87463556851311,
"max": 376.53731343283584,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49306.0,
"min": 48905.0,
"max": 50456.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999937.0,
"min": 49991.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999937.0,
"min": 49991.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5103981494903564,
"min": 0.07892615348100662,
"max": 2.5278217792510986,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1722.133056640625,
"min": 10.497178077697754,
"max": 1722.133056640625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8975014299067396,
"min": 1.8497056477051927,
"max": 4.005753934487233,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2673.6859809160233,
"min": 246.01085114479065,
"max": 2680.0980911254883,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8975014299067396,
"min": 1.8497056477051927,
"max": 4.005753934487233,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2673.6859809160233,
"min": 246.01085114479065,
"max": 2680.0980911254883,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016178777139349326,
"min": 0.012990814041889582,
"max": 0.0212116534501547,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04853633141804797,
"min": 0.025981628083779164,
"max": 0.05631081961716215,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05836854167282581,
"min": 0.022994611443330842,
"max": 0.06169581202169259,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17510562501847743,
"min": 0.045989222886661685,
"max": 0.18508743606507777,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.869598710166671e-06,
"min": 3.869598710166671e-06,
"max": 0.00029536530154489996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1608796130500012e-05,
"min": 1.1608796130500012e-05,
"max": 0.0008439984186671999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128983333333332,
"min": 0.10128983333333332,
"max": 0.1984551,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30386949999999996,
"min": 0.20772089999999999,
"max": 0.5813328000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.43626833333334e-05,
"min": 7.43626833333334e-05,
"max": 0.00492290949,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002230880500000002,
"min": 0.0002230880500000002,
"max": 0.014068506720000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670497529",
"python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670499775"
},
"total": 2245.989673639,
"count": 1,
"self": 0.39601139400019747,
"children": {
"run_training.setup": {
"total": 0.19334831499998018,
"count": 1,
"self": 0.19334831499998018
},
"TrainerController.start_learning": {
"total": 2245.40031393,
"count": 1,
"self": 3.8586163590639444,
"children": {
"TrainerController._reset_env": {
"total": 11.298027630999968,
"count": 1,
"self": 11.298027630999968
},
"TrainerController.advance": {
"total": 2230.123081426936,
"count": 233463,
"self": 4.186071180919498,
"children": {
"env_step": {
"total": 1745.4875454790454,
"count": 233463,
"self": 1468.0871325101173,
"children": {
"SubprocessEnvManager._take_step": {
"total": 274.7790398230051,
"count": 233463,
"self": 14.275562855998771,
"children": {
"TorchPolicy.evaluate": {
"total": 260.50347696700635,
"count": 222938,
"self": 65.3585321010226,
"children": {
"TorchPolicy.sample_actions": {
"total": 195.14494486598375,
"count": 222938,
"self": 195.14494486598375
}
}
}
}
},
"workers": {
"total": 2.6213731459229734,
"count": 233463,
"self": 0.0,
"children": {
"worker_root": {
"total": 2237.3854633468973,
"count": 233463,
"is_parallel": true,
"self": 1029.7785825289043,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027987020000068696,
"count": 1,
"is_parallel": true,
"self": 0.0003562830000305439,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024424189999763257,
"count": 2,
"is_parallel": true,
"self": 0.0024424189999763257
}
}
},
"UnityEnvironment.step": {
"total": 0.02776963200000182,
"count": 1,
"is_parallel": true,
"self": 0.00026455599993369106,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001955960000259438,
"count": 1,
"is_parallel": true,
"self": 0.0001955960000259438
},
"communicator.exchange": {
"total": 0.026628391000031115,
"count": 1,
"is_parallel": true,
"self": 0.026628391000031115
},
"steps_from_proto": {
"total": 0.0006810890000110703,
"count": 1,
"is_parallel": true,
"self": 0.00022856599997567173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004525230000353986,
"count": 2,
"is_parallel": true,
"self": 0.0004525230000353986
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1207.606880817993,
"count": 233462,
"is_parallel": true,
"self": 35.03207927398557,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.77695432703808,
"count": 233462,
"is_parallel": true,
"self": 75.77695432703808
},
"communicator.exchange": {
"total": 1003.4429934839809,
"count": 233462,
"is_parallel": true,
"self": 1003.4429934839809
},
"steps_from_proto": {
"total": 93.35485373298843,
"count": 233462,
"is_parallel": true,
"self": 38.035066385970026,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.319787347018405,
"count": 466924,
"is_parallel": true,
"self": 55.319787347018405
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 480.44946476697123,
"count": 233463,
"self": 5.947910854007603,
"children": {
"process_trajectory": {
"total": 153.91322432096354,
"count": 233463,
"self": 153.42061308496307,
"children": {
"RLTrainer._checkpoint": {
"total": 0.49261123600047085,
"count": 4,
"self": 0.49261123600047085
}
}
},
"_update_policy": {
"total": 320.5883295920001,
"count": 97,
"self": 267.17715558299824,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.411174009001854,
"count": 2910,
"self": 53.411174009001854
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.959999260900076e-07,
"count": 1,
"self": 9.959999260900076e-07
},
"TrainerController._save_models": {
"total": 0.12058751699987624,
"count": 1,
"self": 0.002217868000116141,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1183696489997601,
"count": 1,
"self": 0.1183696489997601
}
}
}
}
}
}
}