ppo-Huggy / run_logs /timers.json
apparition's picture
Huggy
8e8ad36
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4105886220932007,
"min": 1.4105886220932007,
"max": 1.426993727684021,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71382.8359375,
"min": 68655.484375,
"max": 77428.765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.81239804241436,
"min": 77.69059011164275,
"max": 395.3307086614173,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49538.0,
"min": 48712.0,
"max": 50207.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999449.0,
"min": 49584.0,
"max": 1999449.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999449.0,
"min": 49584.0,
"max": 1999449.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.492112636566162,
"min": 0.05112152174115181,
"max": 2.492112636566162,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1527.6650390625,
"min": 6.441311836242676,
"max": 1527.6650390625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.955545475879657,
"min": 1.7229092064357938,
"max": 3.970777125254921,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2424.7493767142296,
"min": 217.08656001091003,
"max": 2424.7493767142296,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.955545475879657,
"min": 1.7229092064357938,
"max": 3.970777125254921,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2424.7493767142296,
"min": 217.08656001091003,
"max": 2424.7493767142296,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016450505093477355,
"min": 0.01424388706557996,
"max": 0.02196421070257202,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.049351515280432065,
"min": 0.02913375865124787,
"max": 0.058211932093642345,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0598575725323624,
"min": 0.022188248236974083,
"max": 0.060420664275685945,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1795727175970872,
"min": 0.046172330155968666,
"max": 0.1811665921161572,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5691988103000027e-06,
"min": 3.5691988103000027e-06,
"max": 0.000295321126559625,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0707596430900009e-05,
"min": 1.0707596430900009e-05,
"max": 0.0008436109687963498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1011897,
"min": 0.1011897,
"max": 0.19844037499999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035691,
"min": 0.20751915,
"max": 0.5812036500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.936603000000006e-05,
"min": 6.936603000000006e-05,
"max": 0.004922174712500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020809809000000017,
"min": 0.00020809809000000017,
"max": 0.014062062135000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679054520",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679056902"
},
"total": 2382.6530678980002,
"count": 1,
"self": 0.4350194840003496,
"children": {
"run_training.setup": {
"total": 0.10543129600000611,
"count": 1,
"self": 0.10543129600000611
},
"TrainerController.start_learning": {
"total": 2382.112617118,
"count": 1,
"self": 4.214305752023392,
"children": {
"TrainerController._reset_env": {
"total": 9.351871592000009,
"count": 1,
"self": 9.351871592000009
},
"TrainerController.advance": {
"total": 2368.4259863669768,
"count": 232490,
"self": 4.631302410949047,
"children": {
"env_step": {
"total": 1845.8023807020018,
"count": 232490,
"self": 1557.2104407348734,
"children": {
"SubprocessEnvManager._take_step": {
"total": 285.774118716077,
"count": 232490,
"self": 16.774016219082625,
"children": {
"TorchPolicy.evaluate": {
"total": 269.0001024969944,
"count": 222914,
"self": 269.0001024969944
}
}
},
"workers": {
"total": 2.817821251051271,
"count": 232490,
"self": 0.0,
"children": {
"worker_root": {
"total": 2374.0600080650493,
"count": 232490,
"is_parallel": true,
"self": 1108.0127051370919,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001147125999978016,
"count": 1,
"is_parallel": true,
"self": 0.00026981999997133244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008773060000066835,
"count": 2,
"is_parallel": true,
"self": 0.0008773060000066835
}
}
},
"UnityEnvironment.step": {
"total": 0.028846207000015056,
"count": 1,
"is_parallel": true,
"self": 0.0003237190000504597,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002272159999847645,
"count": 1,
"is_parallel": true,
"self": 0.0002272159999847645
},
"communicator.exchange": {
"total": 0.02759059699997124,
"count": 1,
"is_parallel": true,
"self": 0.02759059699997124
},
"steps_from_proto": {
"total": 0.0007046750000085922,
"count": 1,
"is_parallel": true,
"self": 0.0002107840000462602,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000493890999962332,
"count": 2,
"is_parallel": true,
"self": 0.000493890999962332
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1266.0473029279574,
"count": 232489,
"is_parallel": true,
"self": 38.73370026398106,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.51245763104475,
"count": 232489,
"is_parallel": true,
"self": 77.51245763104475
},
"communicator.exchange": {
"total": 1060.3346700368666,
"count": 232489,
"is_parallel": true,
"self": 1060.3346700368666
},
"steps_from_proto": {
"total": 89.46647499606496,
"count": 232489,
"is_parallel": true,
"self": 33.73431925411347,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.73215574195149,
"count": 464978,
"is_parallel": true,
"self": 55.73215574195149
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 517.9923032540261,
"count": 232490,
"self": 6.891488336062309,
"children": {
"process_trajectory": {
"total": 146.4997940089646,
"count": 232490,
"self": 145.26721550796412,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2325785010004893,
"count": 10,
"self": 1.2325785010004893
}
}
},
"_update_policy": {
"total": 364.60102090899915,
"count": 97,
"self": 305.94375054999773,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.657270359001416,
"count": 2910,
"self": 58.657270359001416
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1409997568989638e-06,
"count": 1,
"self": 1.1409997568989638e-06
},
"TrainerController._save_models": {
"total": 0.12045226600002934,
"count": 1,
"self": 0.0022324409997054318,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11821982500032391,
"count": 1,
"self": 0.11821982500032391
}
}
}
}
}
}
}