ppo-Huggy / run_logs /timers.json
fuatenginoruc's picture
Huggy
53e2a9f
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4142838716506958,
"min": 1.4142838716506958,
"max": 1.4311383962631226,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 67973.3125,
"min": 67973.3125,
"max": 76546.4453125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 119.65291262135922,
"min": 92.20708955223881,
"max": 423.22033898305085,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49297.0,
"min": 49052.0,
"max": 49996.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999963.0,
"min": 49345.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999963.0,
"min": 49345.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.2681150436401367,
"min": -0.01191757246851921,
"max": 2.416292905807495,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 934.4634399414062,
"min": -1.3943560123443604,
"max": 1263.8406982421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4074410521289678,
"min": 1.8874357549043803,
"max": 3.941947650394322,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1403.8657134771347,
"min": 220.82998332381248,
"max": 2001.6414258480072,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4074410521289678,
"min": 1.8874357549043803,
"max": 3.941947650394322,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1403.8657134771347,
"min": 220.82998332381248,
"max": 2001.6414258480072,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018138978214442936,
"min": 0.012592292051946667,
"max": 0.02110663745934289,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05441693464332881,
"min": 0.025184584103893334,
"max": 0.05750888650363777,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04310283145556847,
"min": 0.024080050891886154,
"max": 0.062386614291204345,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1293084943667054,
"min": 0.04816010178377231,
"max": 0.18715984287361304,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.330348889916667e-06,
"min": 3.330348889916667e-06,
"max": 0.00029537025154325,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.99104666975e-06,
"min": 9.99104666975e-06,
"max": 0.0008442736685754498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111008333333331,
"min": 0.10111008333333331,
"max": 0.19845675000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033302499999999,
"min": 0.2073752000000001,
"max": 0.5814245500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.539315833333336e-05,
"min": 6.539315833333336e-05,
"max": 0.004922991825,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019617947500000007,
"min": 0.00019617947500000007,
"max": 0.014073085045000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671545473",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671547678"
},
"total": 2205.653186448,
"count": 1,
"self": 0.3918140170003426,
"children": {
"run_training.setup": {
"total": 0.10864998800002468,
"count": 1,
"self": 0.10864998800002468
},
"TrainerController.start_learning": {
"total": 2205.1527224429997,
"count": 1,
"self": 3.6156971369978237,
"children": {
"TrainerController._reset_env": {
"total": 7.897993654999993,
"count": 1,
"self": 7.897993654999993
},
"TrainerController.advance": {
"total": 2193.514154032002,
"count": 231219,
"self": 3.875395496973397,
"children": {
"env_step": {
"total": 1727.274241374051,
"count": 231219,
"self": 1451.7732055572067,
"children": {
"SubprocessEnvManager._take_step": {
"total": 272.88129482491945,
"count": 231219,
"self": 13.936271699903898,
"children": {
"TorchPolicy.evaluate": {
"total": 258.94502312501555,
"count": 222828,
"self": 64.71632552500893,
"children": {
"TorchPolicy.sample_actions": {
"total": 194.22869760000663,
"count": 222828,
"self": 194.22869760000663
}
}
}
}
},
"workers": {
"total": 2.6197409919248003,
"count": 231219,
"self": 0.0,
"children": {
"worker_root": {
"total": 2197.458006906055,
"count": 231219,
"is_parallel": true,
"self": 999.6185749280448,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020725069999798507,
"count": 1,
"is_parallel": true,
"self": 0.0003032469999766363,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017692600000032144,
"count": 2,
"is_parallel": true,
"self": 0.0017692600000032144
}
}
},
"UnityEnvironment.step": {
"total": 0.02674530199999481,
"count": 1,
"is_parallel": true,
"self": 0.00028213299998469665,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017595200000641853,
"count": 1,
"is_parallel": true,
"self": 0.00017595200000641853
},
"communicator.exchange": {
"total": 0.025593737999997757,
"count": 1,
"is_parallel": true,
"self": 0.025593737999997757
},
"steps_from_proto": {
"total": 0.0006934790000059365,
"count": 1,
"is_parallel": true,
"self": 0.00023673699996606956,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000456742000039867,
"count": 2,
"is_parallel": true,
"self": 0.000456742000039867
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1197.83943197801,
"count": 231218,
"is_parallel": true,
"self": 34.83953874389704,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.04219063507526,
"count": 231218,
"is_parallel": true,
"self": 77.04219063507526
},
"communicator.exchange": {
"total": 992.6559531490927,
"count": 231218,
"is_parallel": true,
"self": 992.6559531490927
},
"steps_from_proto": {
"total": 93.30174944994502,
"count": 231218,
"is_parallel": true,
"self": 38.16757621792692,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.1341732320181,
"count": 462436,
"is_parallel": true,
"self": 55.1341732320181
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 462.36451716097736,
"count": 231219,
"self": 5.640358936823532,
"children": {
"process_trajectory": {
"total": 141.85290994115218,
"count": 231219,
"self": 140.6722137811518,
"children": {
"RLTrainer._checkpoint": {
"total": 1.180696160000366,
"count": 10,
"self": 1.180696160000366
}
}
},
"_update_policy": {
"total": 314.87124828300165,
"count": 97,
"self": 261.84188133701144,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.029366945990205,
"count": 2910,
"self": 53.029366945990205
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.760002856433857e-07,
"count": 1,
"self": 8.760002856433857e-07
},
"TrainerController._save_models": {
"total": 0.1248767429997315,
"count": 1,
"self": 0.0021160649994271807,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12276067800030432,
"count": 1,
"self": 0.12276067800030432
}
}
}
}
}
}
}