ppo-Huggy / run_logs /timers.json
godmethium's picture
Huggy
1f01ab8
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4028698205947876,
"min": 1.4028698205947876,
"max": 1.4244840145111084,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71251.7578125,
"min": 68231.296875,
"max": 76737.6640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.53088480801335,
"min": 82.53088480801335,
"max": 401.696,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49436.0,
"min": 49202.0,
"max": 50212.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999937.0,
"min": 49867.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999937.0,
"min": 49867.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.399496555328369,
"min": -0.10597319900989532,
"max": 2.4546597003936768,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1437.2984619140625,
"min": -13.140676498413086,
"max": 1453.99365234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.778512103529725,
"min": 1.618101525931589,
"max": 3.883151812967679,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2263.328750014305,
"min": 200.64458921551704,
"max": 2307.165181219578,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.778512103529725,
"min": 1.618101525931589,
"max": 3.883151812967679,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2263.328750014305,
"min": 200.64458921551704,
"max": 2307.165181219578,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014808915089169103,
"min": 0.013778477982517263,
"max": 0.02357511678079997,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04442674526750731,
"min": 0.027556955965034526,
"max": 0.05744188116368605,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06457541547715663,
"min": 0.022712051247557007,
"max": 0.06457541547715663,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1937262464314699,
"min": 0.04542410249511401,
"max": 0.1937262464314699,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3436488854833283e-06,
"min": 3.3436488854833283e-06,
"max": 0.00029533822655392495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0030946656449985e-05,
"min": 1.0030946656449985e-05,
"max": 0.0008441352186215999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111451666666664,
"min": 0.10111451666666664,
"max": 0.19844607500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033435499999999,
"min": 0.2073529,
"max": 0.5813784000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.56143816666666e-05,
"min": 6.56143816666666e-05,
"max": 0.0049224591425,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019684314499999979,
"min": 0.00019684314499999979,
"max": 0.014070782160000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686302081",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686304636"
},
"total": 2555.0046473519997,
"count": 1,
"self": 0.84094374399956,
"children": {
"run_training.setup": {
"total": 0.04477935999989313,
"count": 1,
"self": 0.04477935999989313
},
"TrainerController.start_learning": {
"total": 2554.1189242480004,
"count": 1,
"self": 4.717427193979802,
"children": {
"TrainerController._reset_env": {
"total": 4.759873728999992,
"count": 1,
"self": 4.759873728999992
},
"TrainerController.advance": {
"total": 2544.44280867502,
"count": 231821,
"self": 4.766091861004043,
"children": {
"env_step": {
"total": 1995.7312761529683,
"count": 231821,
"self": 1677.6927263171897,
"children": {
"SubprocessEnvManager._take_step": {
"total": 315.063570464903,
"count": 231821,
"self": 17.950349691945803,
"children": {
"TorchPolicy.evaluate": {
"total": 297.1132207729572,
"count": 222908,
"self": 297.1132207729572
}
}
},
"workers": {
"total": 2.9749793708756442,
"count": 231821,
"self": 0.0,
"children": {
"worker_root": {
"total": 2545.8384939378293,
"count": 231821,
"is_parallel": true,
"self": 1175.4170127367363,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009515379999811557,
"count": 1,
"is_parallel": true,
"self": 0.0002453560000503785,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007061819999307772,
"count": 2,
"is_parallel": true,
"self": 0.0007061819999307772
}
}
},
"UnityEnvironment.step": {
"total": 0.031900004000135596,
"count": 1,
"is_parallel": true,
"self": 0.00033283600009781367,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022205400000530062,
"count": 1,
"is_parallel": true,
"self": 0.00022205400000530062
},
"communicator.exchange": {
"total": 0.030581122000057803,
"count": 1,
"is_parallel": true,
"self": 0.030581122000057803
},
"steps_from_proto": {
"total": 0.000763991999974678,
"count": 1,
"is_parallel": true,
"self": 0.00023979200000212586,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005241999999725522,
"count": 2,
"is_parallel": true,
"self": 0.0005241999999725522
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1370.421481201093,
"count": 231820,
"is_parallel": true,
"self": 41.0765366020687,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.12698268896202,
"count": 231820,
"is_parallel": true,
"self": 86.12698268896202
},
"communicator.exchange": {
"total": 1143.0675871900098,
"count": 231820,
"is_parallel": true,
"self": 1143.0675871900098
},
"steps_from_proto": {
"total": 100.15037472005247,
"count": 231820,
"is_parallel": true,
"self": 37.70770103702512,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.44267368302735,
"count": 463640,
"is_parallel": true,
"self": 62.44267368302735
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 543.9454406610475,
"count": 231821,
"self": 7.190314162965024,
"children": {
"process_trajectory": {
"total": 144.1531565300852,
"count": 231821,
"self": 142.72587819008504,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4272783400001572,
"count": 10,
"self": 1.4272783400001572
}
}
},
"_update_policy": {
"total": 392.6019699679973,
"count": 97,
"self": 331.26146718497444,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.340502783022885,
"count": 2910,
"self": 61.340502783022885
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5010000424808823e-06,
"count": 1,
"self": 1.5010000424808823e-06
},
"TrainerController._save_models": {
"total": 0.19881314900067082,
"count": 1,
"self": 0.0028690859999187523,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19594406300075207,
"count": 1,
"self": 0.19594406300075207
}
}
}
}
}
}
}