ppo-Huggy / run_logs /timers.json
Edgar404's picture
Huggy
d7fdf6e verified
raw
history blame contribute delete
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4051772356033325,
"min": 1.4051772356033325,
"max": 1.4273061752319336,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69470.5546875,
"min": 69470.5546875,
"max": 76005.9453125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.20190476190476,
"min": 82.435,
"max": 386.6046511627907,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49456.0,
"min": 48783.0,
"max": 50276.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999887.0,
"min": 49642.0,
"max": 1999887.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999887.0,
"min": 49642.0,
"max": 1999887.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.388550043106079,
"min": 0.01902098022401333,
"max": 2.469984769821167,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1253.98876953125,
"min": 2.434685468673706,
"max": 1441.755859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6827304706119355,
"min": 1.639048013894353,
"max": 3.9253491645913505,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1933.4334970712662,
"min": 209.7981457784772,
"max": 2316.7945279479027,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6827304706119355,
"min": 1.639048013894353,
"max": 3.9253491645913505,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1933.4334970712662,
"min": 209.7981457784772,
"max": 2316.7945279479027,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0178236319833862,
"min": 0.014724919642806828,
"max": 0.019780695398609775,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.053470895950158595,
"min": 0.029449839285613656,
"max": 0.05637154244856599,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.052229482059677444,
"min": 0.022948492877185346,
"max": 0.05878663441787163,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15668844617903233,
"min": 0.04589698575437069,
"max": 0.16798016813894112,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.399998866699999e-06,
"min": 3.399998866699999e-06,
"max": 0.00029527560157479996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0199996600099997e-05,
"min": 1.0199996600099997e-05,
"max": 0.00084388276870575,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10113329999999998,
"min": 0.10113329999999998,
"max": 0.19842520000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30339989999999994,
"min": 0.20740379999999997,
"max": 0.5812942500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.655167e-05,
"min": 6.655167e-05,
"max": 0.004921417480000002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019965501,
"min": 0.00019965501,
"max": 0.014066583075000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1712997424",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1712999833"
},
"total": 2408.567401628,
"count": 1,
"self": 0.8349859380000453,
"children": {
"run_training.setup": {
"total": 0.053770941999971456,
"count": 1,
"self": 0.053770941999971456
},
"TrainerController.start_learning": {
"total": 2407.678644748,
"count": 1,
"self": 3.984485574986593,
"children": {
"TrainerController._reset_env": {
"total": 2.8831173119999676,
"count": 1,
"self": 2.8831173119999676
},
"TrainerController.advance": {
"total": 2400.6388201570135,
"count": 232365,
"self": 4.386312885009829,
"children": {
"env_step": {
"total": 1890.4695821809448,
"count": 232365,
"self": 1571.624090146022,
"children": {
"SubprocessEnvManager._take_step": {
"total": 316.15528596295036,
"count": 232365,
"self": 16.879276630911193,
"children": {
"TorchPolicy.evaluate": {
"total": 299.27600933203917,
"count": 222966,
"self": 299.27600933203917
}
}
},
"workers": {
"total": 2.690206071972625,
"count": 232365,
"self": 0.0,
"children": {
"worker_root": {
"total": 2400.3237156799923,
"count": 232365,
"is_parallel": true,
"self": 1127.1843809200432,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009865769999919394,
"count": 1,
"is_parallel": true,
"self": 0.00027182500002709276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007147519999648466,
"count": 2,
"is_parallel": true,
"self": 0.0007147519999648466
}
}
},
"UnityEnvironment.step": {
"total": 0.030388716999993903,
"count": 1,
"is_parallel": true,
"self": 0.0004139840000334516,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002520119999758208,
"count": 1,
"is_parallel": true,
"self": 0.0002520119999758208
},
"communicator.exchange": {
"total": 0.02894879799998762,
"count": 1,
"is_parallel": true,
"self": 0.02894879799998762
},
"steps_from_proto": {
"total": 0.0007739229999970121,
"count": 1,
"is_parallel": true,
"self": 0.00020926199999848905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000564660999998523,
"count": 2,
"is_parallel": true,
"self": 0.000564660999998523
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1273.1393347599492,
"count": 232364,
"is_parallel": true,
"self": 39.46735792204595,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.65073569790167,
"count": 232364,
"is_parallel": true,
"self": 84.65073569790167
},
"communicator.exchange": {
"total": 1056.475654065034,
"count": 232364,
"is_parallel": true,
"self": 1056.475654065034
},
"steps_from_proto": {
"total": 92.54558707496761,
"count": 232364,
"is_parallel": true,
"self": 34.79510471100758,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.75048236396003,
"count": 464728,
"is_parallel": true,
"self": 57.75048236396003
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 505.78292509105876,
"count": 232365,
"self": 6.255313765022095,
"children": {
"process_trajectory": {
"total": 149.32577701903625,
"count": 232365,
"self": 147.97124159903586,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3545354200003885,
"count": 10,
"self": 1.3545354200003885
}
}
},
"_update_policy": {
"total": 350.2018343070004,
"count": 97,
"self": 286.5755058620018,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.62632844499859,
"count": 2910,
"self": 63.62632844499859
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5620003068761434e-06,
"count": 1,
"self": 1.5620003068761434e-06
},
"TrainerController._save_models": {
"total": 0.1722201419997873,
"count": 1,
"self": 0.0028359289999571047,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1693842129998302,
"count": 1,
"self": 0.1693842129998302
}
}
}
}
}
}
}