ppo-Huggy / run_logs /timers.json
blackeys's picture
Huggy
8e30f96
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4061198234558105,
"min": 1.4061198234558105,
"max": 1.4284367561340332,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71899.125,
"min": 68950.0,
"max": 77269.546875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 73.47767857142857,
"min": 73.47767857142857,
"max": 378.2121212121212,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49377.0,
"min": 48811.0,
"max": 49924.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999984.0,
"min": 49914.0,
"max": 1999984.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999984.0,
"min": 49914.0,
"max": 1999984.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.473170042037964,
"min": 0.0383739098906517,
"max": 2.5001471042633057,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1661.97021484375,
"min": 5.026982307434082,
"max": 1661.97021484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8346581809400093,
"min": 1.6011361959781356,
"max": 3.935142706053176,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2576.8902975916862,
"min": 209.74884167313576,
"max": 2576.8902975916862,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8346581809400093,
"min": 1.6011361959781356,
"max": 3.935142706053176,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2576.8902975916862,
"min": 209.74884167313576,
"max": 2576.8902975916862,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014499563197993363,
"min": 0.013416278153696719,
"max": 0.019477857386258773,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04349868959398009,
"min": 0.026832556307393438,
"max": 0.05678281305833177,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05704939833117856,
"min": 0.02263005571439862,
"max": 0.061614136397838595,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17114819499353567,
"min": 0.04526011142879724,
"max": 0.1752358626574278,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7237987587666734e-06,
"min": 3.7237987587666734e-06,
"max": 0.0002953677015441,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.117139627630002e-05,
"min": 1.117139627630002e-05,
"max": 0.00084420256859915,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10124123333333335,
"min": 0.10124123333333335,
"max": 0.19845590000000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30372370000000004,
"min": 0.20764875000000005,
"max": 0.58140085,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.193754333333348e-05,
"min": 7.193754333333348e-05,
"max": 0.00492294941,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021581263000000045,
"min": 0.00021581263000000045,
"max": 0.014071902415000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683219729",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683222231"
},
"total": 2501.6617155040003,
"count": 1,
"self": 0.4479507330006527,
"children": {
"run_training.setup": {
"total": 0.054380058000106146,
"count": 1,
"self": 0.054380058000106146
},
"TrainerController.start_learning": {
"total": 2501.159384713,
"count": 1,
"self": 4.546831835068588,
"children": {
"TrainerController._reset_env": {
"total": 4.546329196000215,
"count": 1,
"self": 4.546329196000215
},
"TrainerController.advance": {
"total": 2491.944039337931,
"count": 233225,
"self": 4.723298957960651,
"children": {
"env_step": {
"total": 1943.9120344379846,
"count": 233225,
"self": 1646.6185201199391,
"children": {
"SubprocessEnvManager._take_step": {
"total": 294.4022826160715,
"count": 233225,
"self": 17.465657763992112,
"children": {
"TorchPolicy.evaluate": {
"total": 276.9366248520794,
"count": 223027,
"self": 276.9366248520794
}
}
},
"workers": {
"total": 2.891231701973993,
"count": 233225,
"self": 0.0,
"children": {
"worker_root": {
"total": 2492.716405847929,
"count": 233225,
"is_parallel": true,
"self": 1146.5294656109088,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008931529998790211,
"count": 1,
"is_parallel": true,
"self": 0.00028339999971649377,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006097530001625273,
"count": 2,
"is_parallel": true,
"self": 0.0006097530001625273
}
}
},
"UnityEnvironment.step": {
"total": 0.02997302999983731,
"count": 1,
"is_parallel": true,
"self": 0.0003156449995458388,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021884900002078211,
"count": 1,
"is_parallel": true,
"self": 0.00021884900002078211
},
"communicator.exchange": {
"total": 0.028716271000121196,
"count": 1,
"is_parallel": true,
"self": 0.028716271000121196
},
"steps_from_proto": {
"total": 0.0007222650001494912,
"count": 1,
"is_parallel": true,
"self": 0.00021623200018439093,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005060329999651003,
"count": 2,
"is_parallel": true,
"self": 0.0005060329999651003
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1346.18694023702,
"count": 233224,
"is_parallel": true,
"self": 39.29006291916744,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.89633852494126,
"count": 233224,
"is_parallel": true,
"self": 83.89633852494126
},
"communicator.exchange": {
"total": 1126.5806700549356,
"count": 233224,
"is_parallel": true,
"self": 1126.5806700549356
},
"steps_from_proto": {
"total": 96.41986873797578,
"count": 233224,
"is_parallel": true,
"self": 37.343212051860064,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.07665668611571,
"count": 466448,
"is_parallel": true,
"self": 59.07665668611571
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 543.3087059419854,
"count": 233225,
"self": 6.794888253185718,
"children": {
"process_trajectory": {
"total": 143.7713672268021,
"count": 233225,
"self": 142.2888240198006,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4825432070015268,
"count": 10,
"self": 1.4825432070015268
}
}
},
"_update_policy": {
"total": 392.7424504619976,
"count": 97,
"self": 333.7744550440011,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.96799541799646,
"count": 2910,
"self": 58.96799541799646
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.599998520570807e-07,
"count": 1,
"self": 9.599998520570807e-07
},
"TrainerController._save_models": {
"total": 0.12218338399998174,
"count": 1,
"self": 0.0022434610000345856,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11993992299994716,
"count": 1,
"self": 0.11993992299994716
}
}
}
}
}
}
}