ppo-Huggy / run_logs /timers.json
kerwin7's picture
Huggy
ac1282a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4082492589950562,
"min": 1.4082492589950562,
"max": 1.4261722564697266,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70933.515625,
"min": 68573.9609375,
"max": 78944.2734375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 78.96031746031746,
"min": 68.78351955307262,
"max": 388.2692307692308,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49745.0,
"min": 48838.0,
"max": 50475.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999715.0,
"min": 49855.0,
"max": 1999715.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999715.0,
"min": 49855.0,
"max": 1999715.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4907114505767822,
"min": 0.0657123327255249,
"max": 2.5414974689483643,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1569.148193359375,
"min": 8.476890563964844,
"max": 1759.378173828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8400393267472586,
"min": 1.8014928010082984,
"max": 4.0708517725073445,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2419.224775850773,
"min": 232.3925713300705,
"max": 2799.3455988168716,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8400393267472586,
"min": 1.8014928010082984,
"max": 4.0708517725073445,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2419.224775850773,
"min": 232.3925713300705,
"max": 2799.3455988168716,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01503198719648127,
"min": 0.013607901184877846,
"max": 0.019357518792094196,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04509596158944381,
"min": 0.027215802369755692,
"max": 0.05545685913530179,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05877684739728769,
"min": 0.022502900287508964,
"max": 0.06187217806776365,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17633054219186306,
"min": 0.04500580057501793,
"max": 0.18561653420329094,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7926987358000005e-06,
"min": 3.7926987358000005e-06,
"max": 0.00029534587655137497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1378096207400001e-05,
"min": 1.1378096207400001e-05,
"max": 0.0008439844686718499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10126420000000003,
"min": 0.10126420000000003,
"max": 0.19844862499999993,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037926000000001,
"min": 0.20767495000000002,
"max": 0.58132815,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.308358000000002e-05,
"min": 7.308358000000002e-05,
"max": 0.0049225863875,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021925074000000007,
"min": 0.00021925074000000007,
"max": 0.014068274684999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696839226",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1696841933"
},
"total": 2706.5561718299996,
"count": 1,
"self": 0.8310734949996004,
"children": {
"run_training.setup": {
"total": 0.0505143020000105,
"count": 1,
"self": 0.0505143020000105
},
"TrainerController.start_learning": {
"total": 2705.674584033,
"count": 1,
"self": 4.918478860932282,
"children": {
"TrainerController._reset_env": {
"total": 7.987078443000087,
"count": 1,
"self": 7.987078443000087
},
"TrainerController.advance": {
"total": 2692.6101145430675,
"count": 233728,
"self": 5.170828087029804,
"children": {
"env_step": {
"total": 2070.0126661120826,
"count": 233728,
"self": 1712.47303375702,
"children": {
"SubprocessEnvManager._take_step": {
"total": 354.31803769701037,
"count": 233728,
"self": 17.940462234033134,
"children": {
"TorchPolicy.evaluate": {
"total": 336.37757546297723,
"count": 223012,
"self": 336.37757546297723
}
}
},
"workers": {
"total": 3.221594658052254,
"count": 233728,
"self": 0.0,
"children": {
"worker_root": {
"total": 2697.3612148970087,
"count": 233728,
"is_parallel": true,
"self": 1304.671650688022,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011792159999686191,
"count": 1,
"is_parallel": true,
"self": 0.0003858889999719395,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007933269999966797,
"count": 2,
"is_parallel": true,
"self": 0.0007933269999966797
}
}
},
"UnityEnvironment.step": {
"total": 0.029454385999997612,
"count": 1,
"is_parallel": true,
"self": 0.00035923599989473587,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023527500002273882,
"count": 1,
"is_parallel": true,
"self": 0.00023527500002273882
},
"communicator.exchange": {
"total": 0.028015600999992785,
"count": 1,
"is_parallel": true,
"self": 0.028015600999992785
},
"steps_from_proto": {
"total": 0.0008442740000873528,
"count": 1,
"is_parallel": true,
"self": 0.00026854800000819523,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005757260000791575,
"count": 2,
"is_parallel": true,
"self": 0.0005757260000791575
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1392.6895642089867,
"count": 233727,
"is_parallel": true,
"self": 42.07368748934368,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 93.59794064982304,
"count": 233727,
"is_parallel": true,
"self": 93.59794064982304
},
"communicator.exchange": {
"total": 1148.8927181779077,
"count": 233727,
"is_parallel": true,
"self": 1148.8927181779077
},
"steps_from_proto": {
"total": 108.12521789191203,
"count": 233727,
"is_parallel": true,
"self": 41.82488775403999,
"children": {
"_process_rank_one_or_two_observation": {
"total": 66.30033013787204,
"count": 467454,
"is_parallel": true,
"self": 66.30033013787204
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 617.4266203439552,
"count": 233728,
"self": 7.02502114300637,
"children": {
"process_trajectory": {
"total": 170.6003631389483,
"count": 233728,
"self": 169.29696213394823,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3034010050000688,
"count": 10,
"self": 1.3034010050000688
}
}
},
"_update_policy": {
"total": 439.8012360620005,
"count": 97,
"self": 375.77599135099524,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.02524471100526,
"count": 2910,
"self": 64.02524471100526
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2959999367012642e-06,
"count": 1,
"self": 1.2959999367012642e-06
},
"TrainerController._save_models": {
"total": 0.15891089000024294,
"count": 1,
"self": 0.0029439520003506914,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15596693799989225,
"count": 1,
"self": 0.15596693799989225
}
}
}
}
}
}
}