ppo-Huggy / run_logs /timers.json
fez2022's picture
Huggy
08242b1
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.403193712234497,
"min": 1.403193712234497,
"max": 1.4277969598770142,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70464.1796875,
"min": 69279.71875,
"max": 77116.9453125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 73.13880597014925,
"min": 73.13880597014925,
"max": 415.3388429752066,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49003.0,
"min": 48844.0,
"max": 50256.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999923.0,
"min": 49874.0,
"max": 1999923.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999923.0,
"min": 49874.0,
"max": 1999923.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.430312395095825,
"min": 0.03148931637406349,
"max": 2.546799659729004,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1628.309326171875,
"min": 3.7787179946899414,
"max": 1647.0380859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.767892457478082,
"min": 1.9411556251347064,
"max": 4.082495505391043,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2524.487946510315,
"min": 232.93867501616478,
"max": 2622.632916867733,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.767892457478082,
"min": 1.9411556251347064,
"max": 4.082495505391043,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2524.487946510315,
"min": 232.93867501616478,
"max": 2622.632916867733,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017559296784424482,
"min": 0.013505054391377294,
"max": 0.021377499059114295,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05267789035327344,
"min": 0.027010108782754588,
"max": 0.0559971157150964,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06092391109300985,
"min": 0.022245165115843214,
"max": 0.06373047779003779,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18277173327902954,
"min": 0.04449033023168643,
"max": 0.18653658777475357,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.845098718333322e-06,
"min": 3.845098718333322e-06,
"max": 0.0002953656015448,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1535296154999967e-05,
"min": 1.1535296154999967e-05,
"max": 0.0008442922685692499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128166666666666,
"min": 0.10128166666666666,
"max": 0.19845519999999994,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.303845,
"min": 0.20769410000000008,
"max": 0.58143075,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.395516666666646e-05,
"min": 7.395516666666646e-05,
"max": 0.004922914479999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002218654999999994,
"min": 0.0002218654999999994,
"max": 0.014073394425,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676433571",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676436076"
},
"total": 2505.020255097,
"count": 1,
"self": 0.802504246999888,
"children": {
"run_training.setup": {
"total": 0.11048113399999693,
"count": 1,
"self": 0.11048113399999693
},
"TrainerController.start_learning": {
"total": 2504.107269716,
"count": 1,
"self": 4.43884508694191,
"children": {
"TrainerController._reset_env": {
"total": 10.011768598999993,
"count": 1,
"self": 10.011768598999993
},
"TrainerController.advance": {
"total": 2489.481030627058,
"count": 232472,
"self": 4.676320903140095,
"children": {
"env_step": {
"total": 1938.125713036951,
"count": 232472,
"self": 1618.895392732923,
"children": {
"SubprocessEnvManager._take_step": {
"total": 316.2884437271034,
"count": 232472,
"self": 17.022120930112465,
"children": {
"TorchPolicy.evaluate": {
"total": 299.26632279699095,
"count": 222914,
"self": 74.90608555796018,
"children": {
"TorchPolicy.sample_actions": {
"total": 224.36023723903077,
"count": 222914,
"self": 224.36023723903077
}
}
}
}
},
"workers": {
"total": 2.941876576924642,
"count": 232472,
"self": 0.0,
"children": {
"worker_root": {
"total": 2495.0343455609996,
"count": 232472,
"is_parallel": true,
"self": 1181.6068310370706,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0065940389999923354,
"count": 1,
"is_parallel": true,
"self": 0.0004407090000313474,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006153329999960988,
"count": 2,
"is_parallel": true,
"self": 0.006153329999960988
}
}
},
"UnityEnvironment.step": {
"total": 0.030511880999938512,
"count": 1,
"is_parallel": true,
"self": 0.0002834059999941019,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001978120000103445,
"count": 1,
"is_parallel": true,
"self": 0.0001978120000103445
},
"communicator.exchange": {
"total": 0.02933968199999981,
"count": 1,
"is_parallel": true,
"self": 0.02933968199999981
},
"steps_from_proto": {
"total": 0.0006909809999342542,
"count": 1,
"is_parallel": true,
"self": 0.00022971100008817302,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004612699998460812,
"count": 2,
"is_parallel": true,
"self": 0.0004612699998460812
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1313.427514523929,
"count": 232471,
"is_parallel": true,
"self": 40.028542142987135,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.01862253794411,
"count": 232471,
"is_parallel": true,
"self": 80.01862253794411
},
"communicator.exchange": {
"total": 1092.7154244009566,
"count": 232471,
"is_parallel": true,
"self": 1092.7154244009566
},
"steps_from_proto": {
"total": 100.66492544204095,
"count": 232471,
"is_parallel": true,
"self": 39.19632000202114,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.468605440019815,
"count": 464942,
"is_parallel": true,
"self": 61.468605440019815
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 546.6789966869666,
"count": 232472,
"self": 7.09622865396409,
"children": {
"process_trajectory": {
"total": 168.56426620400146,
"count": 232472,
"self": 167.20737877700117,
"children": {
"RLTrainer._checkpoint": {
"total": 1.356887427000288,
"count": 10,
"self": 1.356887427000288
}
}
},
"_update_policy": {
"total": 371.018501829001,
"count": 97,
"self": 312.40530537698703,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.613196452013995,
"count": 2910,
"self": 58.613196452013995
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1639999684120994e-06,
"count": 1,
"self": 1.1639999684120994e-06
},
"TrainerController._save_models": {
"total": 0.17562423900017166,
"count": 1,
"self": 0.002869432999887067,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1727548060002846,
"count": 1,
"self": 0.1727548060002846
}
}
}
}
}
}
}