ppo-Huggy / run_logs /timers.json
vrajur's picture
Huggy
f601c51
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4048748016357422,
"min": 1.4048748016357422,
"max": 1.4285917282104492,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70122.921875,
"min": 68453.859375,
"max": 77351.109375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.51214953271028,
"min": 88.76840215439856,
"max": 401.096,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49494.0,
"min": 48919.0,
"max": 50261.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999962.0,
"min": 49961.0,
"max": 1999962.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999962.0,
"min": 49961.0,
"max": 1999962.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.402937650680542,
"min": 0.09841720759868622,
"max": 2.425278663635254,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1285.5716552734375,
"min": 12.203733444213867,
"max": 1324.6298828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7886037993653914,
"min": 1.744477953401304,
"max": 3.834019125187805,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2026.9030326604843,
"min": 216.3152662217617,
"max": 2066.7210001945496,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7886037993653914,
"min": 1.744477953401304,
"max": 3.834019125187805,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2026.9030326604843,
"min": 216.3152662217617,
"max": 2066.7210001945496,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018112868808546206,
"min": 0.014454196052975023,
"max": 0.021826755027116937,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05433860642563862,
"min": 0.028908392105950045,
"max": 0.061464128200896084,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0500173950360881,
"min": 0.020752676762640476,
"max": 0.06509349184731641,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1500521851082643,
"min": 0.04150535352528095,
"max": 0.183524793262283,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2441989186333264e-06,
"min": 3.2441989186333264e-06,
"max": 0.00029537805154065,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.73259675589998e-06,
"min": 9.73259675589998e-06,
"max": 0.0008441529186156999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10108136666666667,
"min": 0.10108136666666667,
"max": 0.19845935000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032441,
"min": 0.20730339999999997,
"max": 0.5813843,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.396019666666656e-05,
"min": 6.396019666666656e-05,
"max": 0.004923121564999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001918805899999997,
"min": 0.0001918805899999997,
"max": 0.01407107657,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690978440",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690981136"
},
"total": 2695.667480306,
"count": 1,
"self": 0.73890307100055,
"children": {
"run_training.setup": {
"total": 0.04288926799995352,
"count": 1,
"self": 0.04288926799995352
},
"TrainerController.start_learning": {
"total": 2694.8856879669997,
"count": 1,
"self": 5.56105356101898,
"children": {
"TrainerController._reset_env": {
"total": 4.764052322999987,
"count": 1,
"self": 4.764052322999987
},
"TrainerController.advance": {
"total": 2684.3682525519807,
"count": 231390,
"self": 5.476320906998808,
"children": {
"env_step": {
"total": 2100.1218580460636,
"count": 231390,
"self": 1769.5386136128907,
"children": {
"SubprocessEnvManager._take_step": {
"total": 327.2579144320333,
"count": 231390,
"self": 18.952683564089966,
"children": {
"TorchPolicy.evaluate": {
"total": 308.30523086794335,
"count": 222842,
"self": 308.30523086794335
}
}
},
"workers": {
"total": 3.325330001139605,
"count": 231390,
"self": 0.0,
"children": {
"worker_root": {
"total": 2686.06568202603,
"count": 231390,
"is_parallel": true,
"self": 1247.9706513880055,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008663360000582543,
"count": 1,
"is_parallel": true,
"self": 0.0002918400000453403,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000574496000012914,
"count": 2,
"is_parallel": true,
"self": 0.000574496000012914
}
}
},
"UnityEnvironment.step": {
"total": 0.05218949300001441,
"count": 1,
"is_parallel": true,
"self": 0.00032427499991172226,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023820600006274617,
"count": 1,
"is_parallel": true,
"self": 0.00023820600006274617
},
"communicator.exchange": {
"total": 0.05079354700001204,
"count": 1,
"is_parallel": true,
"self": 0.05079354700001204
},
"steps_from_proto": {
"total": 0.0008334650000278998,
"count": 1,
"is_parallel": true,
"self": 0.00025815300000431307,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005753120000235867,
"count": 2,
"is_parallel": true,
"self": 0.0005753120000235867
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1438.0950306380246,
"count": 231389,
"is_parallel": true,
"self": 42.3590516801828,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 90.43069774994729,
"count": 231389,
"is_parallel": true,
"self": 90.43069774994729
},
"communicator.exchange": {
"total": 1197.32663150398,
"count": 231389,
"is_parallel": true,
"self": 1197.32663150398
},
"steps_from_proto": {
"total": 107.97864970391436,
"count": 231389,
"is_parallel": true,
"self": 40.2490534300598,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.72959627385455,
"count": 462778,
"is_parallel": true,
"self": 67.72959627385455
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 578.7700735989184,
"count": 231390,
"self": 8.10818218082386,
"children": {
"process_trajectory": {
"total": 149.7508737420934,
"count": 231390,
"self": 148.31014104309338,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4407326990000229,
"count": 10,
"self": 1.4407326990000229
}
}
},
"_update_policy": {
"total": 420.91101767600117,
"count": 97,
"self": 359.2660408809894,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.64497679501176,
"count": 2910,
"self": 61.64497679501176
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4749998626939487e-06,
"count": 1,
"self": 1.4749998626939487e-06
},
"TrainerController._save_models": {
"total": 0.19232805599995118,
"count": 1,
"self": 0.004391084999952,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18793697099999918,
"count": 1,
"self": 0.18793697099999918
}
}
}
}
}
}
}