ppo-Huggy / run_logs /timers.json
setareh1's picture
Huggy puppy
8aedd05
raw
history blame
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4032610654830933,
"min": 1.4032610654830933,
"max": 1.4290275573730469,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70198.1328125,
"min": 69267.265625,
"max": 77004.5625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 91.79005524861878,
"min": 80.2390243902439,
"max": 402.85483870967744,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49842.0,
"min": 48710.0,
"max": 50082.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999857.0,
"min": 49941.0,
"max": 1999857.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999857.0,
"min": 49941.0,
"max": 1999857.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.441159248352051,
"min": 0.0977514311671257,
"max": 2.485295295715332,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1325.5494384765625,
"min": 12.023426055908203,
"max": 1506.4666748046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.763718887038433,
"min": 1.8917136434617081,
"max": 4.033910231989694,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2043.699355661869,
"min": 232.6807781457901,
"max": 2383.1694727540016,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.763718887038433,
"min": 1.8917136434617081,
"max": 4.033910231989694,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2043.699355661869,
"min": 232.6807781457901,
"max": 2383.1694727540016,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0186678880769048,
"min": 0.013339127875709286,
"max": 0.020879148038996695,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.056003664230714394,
"min": 0.02667825575141857,
"max": 0.06109786165834521,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05414946348302894,
"min": 0.02077243089055022,
"max": 0.06433689755698045,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16244839044908682,
"min": 0.04154486178110044,
"max": 0.1846834037452936,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.548248817283328e-06,
"min": 3.548248817283328e-06,
"max": 0.000295285426571525,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0644746451849984e-05,
"min": 1.0644746451849984e-05,
"max": 0.0008437501687499499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118271666666667,
"min": 0.10118271666666667,
"max": 0.19842847500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30354815,
"min": 0.20751894999999998,
"max": 0.58125005,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.901756166666658e-05,
"min": 6.901756166666658e-05,
"max": 0.0049215809025,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020705268499999975,
"min": 0.00020705268499999975,
"max": 0.014064377494999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686659753",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686662066"
},
"total": 2313.373164092,
"count": 1,
"self": 0.438033945999905,
"children": {
"run_training.setup": {
"total": 0.03903810299993893,
"count": 1,
"self": 0.03903810299993893
},
"TrainerController.start_learning": {
"total": 2312.896092043,
"count": 1,
"self": 4.211699368994687,
"children": {
"TrainerController._reset_env": {
"total": 4.606654269999922,
"count": 1,
"self": 4.606654269999922
},
"TrainerController.advance": {
"total": 2303.9636914770053,
"count": 232920,
"self": 4.266338775003078,
"children": {
"env_step": {
"total": 1795.3808481560159,
"count": 232920,
"self": 1514.618867446131,
"children": {
"SubprocessEnvManager._take_step": {
"total": 278.139903951969,
"count": 232920,
"self": 15.722329074876711,
"children": {
"TorchPolicy.evaluate": {
"total": 262.4175748770923,
"count": 223015,
"self": 262.4175748770923
}
}
},
"workers": {
"total": 2.6220767579158064,
"count": 232920,
"self": 0.0,
"children": {
"worker_root": {
"total": 2305.6234232060337,
"count": 232920,
"is_parallel": true,
"self": 1065.5834090800558,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010883649999868794,
"count": 1,
"is_parallel": true,
"self": 0.00031280499990771204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007755600000791674,
"count": 2,
"is_parallel": true,
"self": 0.0007755600000791674
}
}
},
"UnityEnvironment.step": {
"total": 0.06474586000001636,
"count": 1,
"is_parallel": true,
"self": 0.0005197810000936443,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002116340000384298,
"count": 1,
"is_parallel": true,
"self": 0.0002116340000384298
},
"communicator.exchange": {
"total": 0.06324305399994046,
"count": 1,
"is_parallel": true,
"self": 0.06324305399994046
},
"steps_from_proto": {
"total": 0.0007713909999438329,
"count": 1,
"is_parallel": true,
"self": 0.0002481069999475949,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000523283999996238,
"count": 2,
"is_parallel": true,
"self": 0.000523283999996238
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1240.040014125978,
"count": 232919,
"is_parallel": true,
"self": 37.64385302117353,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.39359862591584,
"count": 232919,
"is_parallel": true,
"self": 75.39359862591584
},
"communicator.exchange": {
"total": 1036.232974019892,
"count": 232919,
"is_parallel": true,
"self": 1036.232974019892
},
"steps_from_proto": {
"total": 90.76958845899662,
"count": 232919,
"is_parallel": true,
"self": 32.92648769203083,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.84310076696579,
"count": 465838,
"is_parallel": true,
"self": 57.84310076696579
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 504.3165045459864,
"count": 232920,
"self": 6.398308378989668,
"children": {
"process_trajectory": {
"total": 131.8979222649981,
"count": 232920,
"self": 130.67406687199832,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2238553929997806,
"count": 10,
"self": 1.2238553929997806
}
}
},
"_update_policy": {
"total": 366.0202739019986,
"count": 97,
"self": 308.72440478299427,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.29586911900435,
"count": 2910,
"self": 57.29586911900435
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.479999789618887e-07,
"count": 1,
"self": 9.479999789618887e-07
},
"TrainerController._save_models": {
"total": 0.11404597899991131,
"count": 1,
"self": 0.0022234540001591085,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1118225249997522,
"count": 1,
"self": 0.1118225249997522
}
}
}
}
}
}
}