ppo-Huggy / run_logs /timers.json
eugene6's picture
Huggy
ad395ad
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4106389284133911,
"min": 1.4106389284133911,
"max": 1.426804542541504,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70651.8515625,
"min": 69287.0859375,
"max": 77676.765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.6687402799378,
"min": 69.7758865248227,
"max": 416.3471074380165,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49298.0,
"min": 49192.0,
"max": 50378.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999955.0,
"min": 49749.0,
"max": 1999955.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999955.0,
"min": 49749.0,
"max": 1999955.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5191328525543213,
"min": -0.0357179157435894,
"max": 2.5198822021484375,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1619.802490234375,
"min": -4.286149978637695,
"max": 1764.00244140625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.923096710471306,
"min": 1.8294959542651972,
"max": 4.022979872998285,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2522.55118483305,
"min": 219.53951451182365,
"max": 2731.1769699454308,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.923096710471306,
"min": 1.8294959542651972,
"max": 4.022979872998285,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2522.55118483305,
"min": 219.53951451182365,
"max": 2731.1769699454308,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015898065453964386,
"min": 0.012524290055504633,
"max": 0.020067471600486896,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04769419636189316,
"min": 0.025048580111009265,
"max": 0.059220782085321846,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06197195318010118,
"min": 0.02519139740616083,
"max": 0.067307034569482,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18591585954030354,
"min": 0.05038279481232166,
"max": 0.18758839045961698,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.862248712616666e-06,
"min": 3.862248712616666e-06,
"max": 0.000295357876547375,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1586746137849997e-05,
"min": 1.1586746137849997e-05,
"max": 0.0008440738686420499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128738333333336,
"min": 0.10128738333333336,
"max": 0.19845262500000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30386215000000005,
"min": 0.20771414999999996,
"max": 0.5813579500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.424042833333333e-05,
"min": 7.424042833333333e-05,
"max": 0.004922785987500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022272128499999997,
"min": 0.00022272128499999997,
"max": 0.014069761705000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694631388",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694633789"
},
"total": 2400.4736321679998,
"count": 1,
"self": 0.4450837090003006,
"children": {
"run_training.setup": {
"total": 0.04457961899993279,
"count": 1,
"self": 0.04457961899993279
},
"TrainerController.start_learning": {
"total": 2399.9839688399998,
"count": 1,
"self": 4.309614270961447,
"children": {
"TrainerController._reset_env": {
"total": 4.1691406859999915,
"count": 1,
"self": 4.1691406859999915
},
"TrainerController.advance": {
"total": 2391.3846699190385,
"count": 233256,
"self": 4.443140151075113,
"children": {
"env_step": {
"total": 1843.4796532380246,
"count": 233256,
"self": 1557.7063651769479,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.89957209206875,
"count": 233256,
"self": 16.068604788072435,
"children": {
"TorchPolicy.evaluate": {
"total": 266.8309673039963,
"count": 222932,
"self": 266.8309673039963
}
}
},
"workers": {
"total": 2.8737159690080034,
"count": 233256,
"self": 0.0,
"children": {
"worker_root": {
"total": 2392.5274589639835,
"count": 233256,
"is_parallel": true,
"self": 1119.0837192899758,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009748259999469155,
"count": 1,
"is_parallel": true,
"self": 0.00026606299991271953,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000708763000034196,
"count": 2,
"is_parallel": true,
"self": 0.000708763000034196
}
}
},
"UnityEnvironment.step": {
"total": 0.02871428699995704,
"count": 1,
"is_parallel": true,
"self": 0.0003301669999018486,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000237093000009736,
"count": 1,
"is_parallel": true,
"self": 0.000237093000009736
},
"communicator.exchange": {
"total": 0.02741675499999019,
"count": 1,
"is_parallel": true,
"self": 0.02741675499999019
},
"steps_from_proto": {
"total": 0.0007302720000552654,
"count": 1,
"is_parallel": true,
"self": 0.00020444300002964155,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005258290000256238,
"count": 2,
"is_parallel": true,
"self": 0.0005258290000256238
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1273.4437396740077,
"count": 233255,
"is_parallel": true,
"self": 39.77294646813857,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.42938483302203,
"count": 233255,
"is_parallel": true,
"self": 80.42938483302203
},
"communicator.exchange": {
"total": 1055.887626285921,
"count": 233255,
"is_parallel": true,
"self": 1055.887626285921
},
"steps_from_proto": {
"total": 97.35378208692623,
"count": 233255,
"is_parallel": true,
"self": 34.642037725930436,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.711744360995795,
"count": 466510,
"is_parallel": true,
"self": 62.711744360995795
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 543.4618765299388,
"count": 233256,
"self": 6.480220414048404,
"children": {
"process_trajectory": {
"total": 139.95562243688858,
"count": 233256,
"self": 138.58186050888878,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3737619279997944,
"count": 10,
"self": 1.3737619279997944
}
}
},
"_update_policy": {
"total": 397.0260336790018,
"count": 97,
"self": 338.31661259099155,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.70942108801023,
"count": 2910,
"self": 58.70942108801023
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.099999260797631e-07,
"count": 1,
"self": 9.099999260797631e-07
},
"TrainerController._save_models": {
"total": 0.12054305399988152,
"count": 1,
"self": 0.0019632549997368187,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1185797990001447,
"count": 1,
"self": 0.1185797990001447
}
}
}
}
}
}
}