ppo-Huggy / run_logs /timers.json
TexR6's picture
Huggy
210e1c9
raw
history blame contribute delete
No virus
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4072438478469849,
"min": 1.4072438478469849,
"max": 1.4305661916732788,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69517.84375,
"min": 67944.390625,
"max": 76364.3828125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.0514950166113,
"min": 78.9376,
"max": 399.8015873015873,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49395.0,
"min": 48756.0,
"max": 50375.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999932.0,
"min": 49962.0,
"max": 1999932.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999932.0,
"min": 49962.0,
"max": 1999932.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4706006050109863,
"min": 0.02737303264439106,
"max": 2.504894733428955,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1487.301513671875,
"min": 3.4216291904449463,
"max": 1541.3978271484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8216871867742253,
"min": 1.6864384317398071,
"max": 3.9473415325288057,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2300.6556864380836,
"min": 210.8048039674759,
"max": 2390.3517414331436,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8216871867742253,
"min": 1.6864384317398071,
"max": 3.9473415325288057,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2300.6556864380836,
"min": 210.8048039674759,
"max": 2390.3517414331436,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017777266304862375,
"min": 0.01271034315044138,
"max": 0.019301300713878946,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03555453260972475,
"min": 0.02542068630088276,
"max": 0.05790390214163684,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05440299374361833,
"min": 0.022962685022503135,
"max": 0.06759498094518979,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10880598748723666,
"min": 0.04592537004500627,
"max": 0.20278494283556936,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.622048459349996e-06,
"min": 4.622048459349996e-06,
"max": 0.00029533185155605,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.244096918699992e-06,
"min": 9.244096918699992e-06,
"max": 0.0008436417187860999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10154065000000001,
"min": 0.10154065000000001,
"max": 0.19844394999999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20308130000000002,
"min": 0.20308130000000002,
"max": 0.5812139000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.687843499999995e-05,
"min": 8.687843499999995e-05,
"max": 0.004922353104999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001737568699999999,
"min": 0.0001737568699999999,
"max": 0.014062573610000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695659598",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695662122"
},
"total": 2524.825836667,
"count": 1,
"self": 0.44119951100037724,
"children": {
"run_training.setup": {
"total": 0.041531860999953096,
"count": 1,
"self": 0.041531860999953096
},
"TrainerController.start_learning": {
"total": 2524.343105295,
"count": 1,
"self": 4.607358351107905,
"children": {
"TrainerController._reset_env": {
"total": 4.162296733999938,
"count": 1,
"self": 4.162296733999938
},
"TrainerController.advance": {
"total": 2515.4492141968926,
"count": 232459,
"self": 5.025110792808391,
"children": {
"env_step": {
"total": 1958.329247416164,
"count": 232459,
"self": 1654.0913313549734,
"children": {
"SubprocessEnvManager._take_step": {
"total": 301.24569701602104,
"count": 232459,
"self": 17.42886377809532,
"children": {
"TorchPolicy.evaluate": {
"total": 283.8168332379257,
"count": 222908,
"self": 283.8168332379257
}
}
},
"workers": {
"total": 2.992219045169577,
"count": 232459,
"self": 0.0,
"children": {
"worker_root": {
"total": 2516.644214177966,
"count": 232459,
"is_parallel": true,
"self": 1164.2437734780342,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000916170999971655,
"count": 1,
"is_parallel": true,
"self": 0.00029303099995559023,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006231400000160647,
"count": 2,
"is_parallel": true,
"self": 0.0006231400000160647
}
}
},
"UnityEnvironment.step": {
"total": 0.03490220499998031,
"count": 1,
"is_parallel": true,
"self": 0.00032854899995982123,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023933199997827614,
"count": 1,
"is_parallel": true,
"self": 0.00023933199997827614
},
"communicator.exchange": {
"total": 0.03355499700001019,
"count": 1,
"is_parallel": true,
"self": 0.03355499700001019
},
"steps_from_proto": {
"total": 0.0007793270000320263,
"count": 1,
"is_parallel": true,
"self": 0.00022807599998486694,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005512510000471593,
"count": 2,
"is_parallel": true,
"self": 0.0005512510000471593
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1352.400440699932,
"count": 232458,
"is_parallel": true,
"self": 40.28480192990992,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 88.49965090907199,
"count": 232458,
"is_parallel": true,
"self": 88.49965090907199
},
"communicator.exchange": {
"total": 1120.6028508439035,
"count": 232458,
"is_parallel": true,
"self": 1120.6028508439035
},
"steps_from_proto": {
"total": 103.01313701704657,
"count": 232458,
"is_parallel": true,
"self": 39.321549500006086,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.69158751704049,
"count": 464916,
"is_parallel": true,
"self": 63.69158751704049
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 552.0948559879201,
"count": 232459,
"self": 6.711204897933044,
"children": {
"process_trajectory": {
"total": 144.60524722698676,
"count": 232459,
"self": 143.24983020098705,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3554170259997136,
"count": 10,
"self": 1.3554170259997136
}
}
},
"_update_policy": {
"total": 400.77840386300034,
"count": 96,
"self": 340.8263049410075,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.95209892199284,
"count": 2880,
"self": 59.95209892199284
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1429997357481625e-06,
"count": 1,
"self": 1.1429997357481625e-06
},
"TrainerController._save_models": {
"total": 0.12423486999978195,
"count": 1,
"self": 0.0019274460000815452,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1223074239997004,
"count": 1,
"self": 0.1223074239997004
}
}
}
}
}
}
}