ppo-Huggy / run_logs /timers.json
Dreaver's picture
Huggy
bf5d07e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4088674783706665,
"min": 1.4088674783706665,
"max": 1.4279935359954834,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69192.296875,
"min": 69068.6875,
"max": 76824.53125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.86587436332768,
"min": 80.0713128038898,
"max": 409.23770491803276,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49397.0,
"min": 48702.0,
"max": 50087.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999950.0,
"min": 49513.0,
"max": 1999950.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999950.0,
"min": 49513.0,
"max": 1999950.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.459567070007324,
"min": 0.03758874163031578,
"max": 2.49548602104187,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1448.6849365234375,
"min": 4.5482378005981445,
"max": 1509.520263671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.799987737411959,
"min": 1.8564186240277014,
"max": 3.9804354101076895,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2238.1927773356438,
"min": 224.62665350735188,
"max": 2380.3867588043213,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.799987737411959,
"min": 1.8564186240277014,
"max": 3.9804354101076895,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2238.1927773356438,
"min": 224.62665350735188,
"max": 2380.3867588043213,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018404740241304455,
"min": 0.013477734297824402,
"max": 0.020436626515099005,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05521422072391336,
"min": 0.026955468595648804,
"max": 0.05783641160232946,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05713541222115357,
"min": 0.022158628236502408,
"max": 0.059890403101841606,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17140623666346072,
"min": 0.044317256473004815,
"max": 0.17967120930552483,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.624998791699999e-06,
"min": 3.624998791699999e-06,
"max": 0.000295325626558125,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0874996375099997e-05,
"min": 1.0874996375099997e-05,
"max": 0.0008442795185734997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120829999999999,
"min": 0.10120829999999999,
"max": 0.198441875,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036249,
"min": 0.207548,
"max": 0.5814265000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.029416999999997e-05,
"min": 7.029416999999997e-05,
"max": 0.0049222495625000015,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021088250999999993,
"min": 0.00021088250999999993,
"max": 0.014073182350000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695960832",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695963292"
},
"total": 2460.064542413,
"count": 1,
"self": 0.8387234729998454,
"children": {
"run_training.setup": {
"total": 0.04348041199995123,
"count": 1,
"self": 0.04348041199995123
},
"TrainerController.start_learning": {
"total": 2459.182338528,
"count": 1,
"self": 4.472226796042378,
"children": {
"TrainerController._reset_env": {
"total": 4.986559091999993,
"count": 1,
"self": 4.986559091999993
},
"TrainerController.advance": {
"total": 2449.5286738579575,
"count": 232371,
"self": 4.637434655972811,
"children": {
"env_step": {
"total": 1890.6212325550296,
"count": 232371,
"self": 1596.305432397022,
"children": {
"SubprocessEnvManager._take_step": {
"total": 291.39350160607677,
"count": 232371,
"self": 16.739988163197324,
"children": {
"TorchPolicy.evaluate": {
"total": 274.65351344287944,
"count": 222895,
"self": 274.65351344287944
}
}
},
"workers": {
"total": 2.922298551930737,
"count": 232371,
"self": 0.0,
"children": {
"worker_root": {
"total": 2451.5982601319947,
"count": 232371,
"is_parallel": true,
"self": 1146.7363078339818,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010053760000232614,
"count": 1,
"is_parallel": true,
"self": 0.00024073700001281395,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007646390000104475,
"count": 2,
"is_parallel": true,
"self": 0.0007646390000104475
}
}
},
"UnityEnvironment.step": {
"total": 0.04992458700002089,
"count": 1,
"is_parallel": true,
"self": 0.00036885800000163727,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022195000002511733,
"count": 1,
"is_parallel": true,
"self": 0.00022195000002511733
},
"communicator.exchange": {
"total": 0.048585941999988336,
"count": 1,
"is_parallel": true,
"self": 0.048585941999988336
},
"steps_from_proto": {
"total": 0.0007478370000058021,
"count": 1,
"is_parallel": true,
"self": 0.00021044900006472744,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005373879999410747,
"count": 2,
"is_parallel": true,
"self": 0.0005373879999410747
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1304.861952298013,
"count": 232370,
"is_parallel": true,
"self": 40.44236176298659,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.70641725308565,
"count": 232370,
"is_parallel": true,
"self": 81.70641725308565
},
"communicator.exchange": {
"total": 1083.6482712169686,
"count": 232370,
"is_parallel": true,
"self": 1083.6482712169686
},
"steps_from_proto": {
"total": 99.0649020649721,
"count": 232370,
"is_parallel": true,
"self": 35.09207019091809,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.97283187405401,
"count": 464740,
"is_parallel": true,
"self": 63.97283187405401
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 554.2700066469554,
"count": 232371,
"self": 6.693190592900692,
"children": {
"process_trajectory": {
"total": 141.09043841905492,
"count": 232371,
"self": 139.66990979605526,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4205286229996545,
"count": 10,
"self": 1.4205286229996545
}
}
},
"_update_policy": {
"total": 406.4863776349997,
"count": 97,
"self": 346.2861665419935,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.2002110930062,
"count": 2910,
"self": 60.2002110930062
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4870001905364916e-06,
"count": 1,
"self": 1.4870001905364916e-06
},
"TrainerController._save_models": {
"total": 0.1948772949999693,
"count": 1,
"self": 0.0026594219998514745,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19221787300011783,
"count": 1,
"self": 0.19221787300011783
}
}
}
}
}
}
}