ppo-Huggy / run_logs /timers.json
xszhou's picture
Huggy
9547ed5
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4117953777313232,
"min": 1.4117953777313232,
"max": 1.4292240142822266,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71155.8984375,
"min": 68747.7734375,
"max": 78076.046875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.27436823104694,
"min": 83.72602739726027,
"max": 399.272,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49458.0,
"min": 48896.0,
"max": 49975.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999957.0,
"min": 49703.0,
"max": 1999957.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999957.0,
"min": 49703.0,
"max": 1999957.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.410107374191284,
"min": 0.008329128846526146,
"max": 2.4224905967712402,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1335.199462890625,
"min": 1.0328119993209839,
"max": 1408.711669921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7878802127140956,
"min": 2.051816166889283,
"max": 3.9824150996823464,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2098.485637843609,
"min": 254.4252046942711,
"max": 2174.1710757017136,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7878802127140956,
"min": 2.051816166889283,
"max": 3.9824150996823464,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2098.485637843609,
"min": 254.4252046942711,
"max": 2174.1710757017136,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01912602135966558,
"min": 0.013348241238458689,
"max": 0.02228242124328972,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05737806407899673,
"min": 0.02882778675411828,
"max": 0.05737806407899673,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.053501552633113326,
"min": 0.024023202868799366,
"max": 0.0635108803709348,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16050465789934,
"min": 0.04804640573759873,
"max": 0.17735502372185388,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1404489532166702e-06,
"min": 3.1404489532166702e-06,
"max": 0.00029537077654307495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.421346859650011e-06,
"min": 9.421346859650011e-06,
"max": 0.0008441803686065499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10104678333333333,
"min": 0.10104678333333333,
"max": 0.19845692499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30314035,
"min": 0.20726460000000008,
"max": 0.5813934500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.223448833333338e-05,
"min": 6.223448833333338e-05,
"max": 0.0049230005575,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018670346500000013,
"min": 0.00018670346500000013,
"max": 0.014071533155,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693804203",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693806824"
},
"total": 2621.378027005,
"count": 1,
"self": 0.8072679270003391,
"children": {
"run_training.setup": {
"total": 0.04130490000000009,
"count": 1,
"self": 0.04130490000000009
},
"TrainerController.start_learning": {
"total": 2620.529454178,
"count": 1,
"self": 4.791731223012448,
"children": {
"TrainerController._reset_env": {
"total": 4.121417148000035,
"count": 1,
"self": 4.121417148000035
},
"TrainerController.advance": {
"total": 2611.415566598987,
"count": 232498,
"self": 4.917226323979776,
"children": {
"env_step": {
"total": 2041.060817612031,
"count": 232498,
"self": 1727.1999915810902,
"children": {
"SubprocessEnvManager._take_step": {
"total": 310.67250610596784,
"count": 232498,
"self": 18.20327888697136,
"children": {
"TorchPolicy.evaluate": {
"total": 292.4692272189965,
"count": 223038,
"self": 292.4692272189965
}
}
},
"workers": {
"total": 3.188319924972973,
"count": 232498,
"self": 0.0,
"children": {
"worker_root": {
"total": 2612.3949553820844,
"count": 232498,
"is_parallel": true,
"self": 1202.1212196921344,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009873460000449086,
"count": 1,
"is_parallel": true,
"self": 0.00024776900005463176,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007395769999902768,
"count": 2,
"is_parallel": true,
"self": 0.0007395769999902768
}
}
},
"UnityEnvironment.step": {
"total": 0.031341634999989765,
"count": 1,
"is_parallel": true,
"self": 0.00031392400001095666,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002453349999882448,
"count": 1,
"is_parallel": true,
"self": 0.0002453349999882448
},
"communicator.exchange": {
"total": 0.030005758000015703,
"count": 1,
"is_parallel": true,
"self": 0.030005758000015703
},
"steps_from_proto": {
"total": 0.0007766179999748601,
"count": 1,
"is_parallel": true,
"self": 0.00021285599996190285,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005637620000129573,
"count": 2,
"is_parallel": true,
"self": 0.0005637620000129573
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1410.27373568995,
"count": 232497,
"is_parallel": true,
"self": 41.5612525798897,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.98734395198517,
"count": 232497,
"is_parallel": true,
"self": 89.98734395198517
},
"communicator.exchange": {
"total": 1174.220520680011,
"count": 232497,
"is_parallel": true,
"self": 1174.220520680011
},
"steps_from_proto": {
"total": 104.50461847806395,
"count": 232497,
"is_parallel": true,
"self": 39.712036839074926,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.79258163898902,
"count": 464994,
"is_parallel": true,
"self": 64.79258163898902
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 565.437522662976,
"count": 232498,
"self": 7.082635707933491,
"children": {
"process_trajectory": {
"total": 149.1379193290415,
"count": 232498,
"self": 147.52230326004207,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6156160689994294,
"count": 10,
"self": 1.6156160689994294
}
}
},
"_update_policy": {
"total": 409.21696762600095,
"count": 97,
"self": 348.1799298940047,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.037037731996236,
"count": 2910,
"self": 61.037037731996236
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.449000137654366e-06,
"count": 1,
"self": 1.449000137654366e-06
},
"TrainerController._save_models": {
"total": 0.20073775900027613,
"count": 1,
"self": 0.002926096000464895,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19781166299981123,
"count": 1,
"self": 0.19781166299981123
}
}
}
}
}
}
}