ppo-Huggy / run_logs /timers.json
emrumo's picture
Huggy
48b1b8a verified
raw
history blame contribute delete
No virus
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.405578851699829,
"min": 1.405578851699829,
"max": 1.42765474319458,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70224.125,
"min": 68089.46875,
"max": 76590.6796875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.58676207513417,
"min": 87.17813051146385,
"max": 376.32835820895525,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49520.0,
"min": 49021.0,
"max": 50428.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999974.0,
"min": 49861.0,
"max": 1999974.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999974.0,
"min": 49861.0,
"max": 1999974.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.382909059524536,
"min": 0.08405806869268417,
"max": 2.4139328002929688,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1332.046142578125,
"min": 11.179722785949707,
"max": 1332.490966796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6942611291924616,
"min": 1.864387382466094,
"max": 3.9058287905043914,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2065.091971218586,
"min": 247.9635218679905,
"max": 2105.9693947434425,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6942611291924616,
"min": 1.864387382466094,
"max": 3.9058287905043914,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2065.091971218586,
"min": 247.9635218679905,
"max": 2105.9693947434425,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016605567320948467,
"min": 0.011083736743118303,
"max": 0.020841702306643128,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0498167019628454,
"min": 0.022167473486236607,
"max": 0.05762142878168865,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05212080391744773,
"min": 0.023603162914514544,
"max": 0.06528775487095118,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1563624117523432,
"min": 0.04720632582902909,
"max": 0.18773240422209103,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.0978489674166666e-06,
"min": 3.0978489674166666e-06,
"max": 0.000295310476563175,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.29354690225e-06,
"min": 9.29354690225e-06,
"max": 0.0008440152186615997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10103258333333336,
"min": 0.10103258333333336,
"max": 0.19843682499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30309775000000005,
"min": 0.20724864999999998,
"max": 0.5813384000000003,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.152590833333332e-05,
"min": 6.152590833333332e-05,
"max": 0.004921997567499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018457772499999995,
"min": 0.00018457772499999995,
"max": 0.014068786160000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717880789",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1717883179"
},
"total": 2389.7834278480004,
"count": 1,
"self": 0.4760830090008312,
"children": {
"run_training.setup": {
"total": 0.05962850299988531,
"count": 1,
"self": 0.05962850299988531
},
"TrainerController.start_learning": {
"total": 2389.247716336,
"count": 1,
"self": 4.58225949800908,
"children": {
"TrainerController._reset_env": {
"total": 2.1811601100000644,
"count": 1,
"self": 2.1811601100000644
},
"TrainerController.advance": {
"total": 2382.368540422991,
"count": 231725,
"self": 4.536303358121586,
"children": {
"env_step": {
"total": 1891.2274800859436,
"count": 231725,
"self": 1564.0824297349025,
"children": {
"SubprocessEnvManager._take_step": {
"total": 324.39430587810125,
"count": 231725,
"self": 18.21315752712917,
"children": {
"TorchPolicy.evaluate": {
"total": 306.1811483509721,
"count": 222870,
"self": 306.1811483509721
}
}
},
"workers": {
"total": 2.750744472939914,
"count": 231725,
"self": 0.0,
"children": {
"worker_root": {
"total": 2382.287323323077,
"count": 231725,
"is_parallel": true,
"self": 1120.7946144630532,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007594260000587383,
"count": 1,
"is_parallel": true,
"self": 0.00018563699995866045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005737890001000778,
"count": 2,
"is_parallel": true,
"self": 0.0005737890001000778
}
}
},
"UnityEnvironment.step": {
"total": 0.029915981000158354,
"count": 1,
"is_parallel": true,
"self": 0.00039821700033826346,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001948499998434272,
"count": 1,
"is_parallel": true,
"self": 0.0001948499998434272
},
"communicator.exchange": {
"total": 0.02855415399994854,
"count": 1,
"is_parallel": true,
"self": 0.02855415399994854
},
"steps_from_proto": {
"total": 0.0007687600000281236,
"count": 1,
"is_parallel": true,
"self": 0.00020259100028852117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005661689997396024,
"count": 2,
"is_parallel": true,
"self": 0.0005661689997396024
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1261.4927088600239,
"count": 231724,
"is_parallel": true,
"self": 38.7478468568936,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.83702966303508,
"count": 231724,
"is_parallel": true,
"self": 80.83702966303508
},
"communicator.exchange": {
"total": 1054.0739899361354,
"count": 231724,
"is_parallel": true,
"self": 1054.0739899361354
},
"steps_from_proto": {
"total": 87.8338424039598,
"count": 231724,
"is_parallel": true,
"self": 30.986908631077085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.84693377288272,
"count": 463448,
"is_parallel": true,
"self": 56.84693377288272
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 486.6047569789257,
"count": 231725,
"self": 6.6584360090234895,
"children": {
"process_trajectory": {
"total": 150.7991070469052,
"count": 231725,
"self": 149.59344616890485,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2056608780003444,
"count": 10,
"self": 1.2056608780003444
}
}
},
"_update_policy": {
"total": 329.147213922997,
"count": 97,
"self": 265.62121607000336,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.52599785299367,
"count": 2910,
"self": 63.52599785299367
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.799999366397969e-07,
"count": 1,
"self": 7.799999366397969e-07
},
"TrainerController._save_models": {
"total": 0.11575552500016784,
"count": 1,
"self": 0.0029236260002107883,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11283189899995705,
"count": 1,
"self": 0.11283189899995705
}
}
}
}
}
}
}