ppo-Huggy / run_logs /timers.json
Boiler's picture
Huggy
05ffd61
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4080817699432373,
"min": 1.4080817699432373,
"max": 1.4284272193908691,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69135.40625,
"min": 69135.40625,
"max": 76588.6953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.825,
"min": 76.90795631825273,
"max": 387.9612403100775,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49095.0,
"min": 48949.0,
"max": 50047.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999929.0,
"min": 49726.0,
"max": 1999929.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999929.0,
"min": 49726.0,
"max": 1999929.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4922502040863037,
"min": -0.04601811617612839,
"max": 2.531200885772705,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1495.35009765625,
"min": -5.890318870544434,
"max": 1604.283447265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8384710224469503,
"min": 1.8016590096522123,
"max": 4.069016265568613,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2303.08261346817,
"min": 230.61235323548317,
"max": 2530.333663582802,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8384710224469503,
"min": 1.8016590096522123,
"max": 4.069016265568613,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2303.08261346817,
"min": 230.61235323548317,
"max": 2530.333663582802,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015933995858490056,
"min": 0.013357469670881982,
"max": 0.01982119088570471,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04780198757547017,
"min": 0.026714939341763964,
"max": 0.0575841957819648,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05939134438004758,
"min": 0.02463892937327425,
"max": 0.06555458419024945,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17817403314014274,
"min": 0.0492778587465485,
"max": 0.1799254688123862,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.865248711616672e-06,
"min": 3.865248711616672e-06,
"max": 0.00029532450155849994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1595746134850018e-05,
"min": 1.1595746134850018e-05,
"max": 0.0008441569686143498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012883833333333,
"min": 0.1012883833333333,
"max": 0.19844150000000008,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3038651499999999,
"min": 0.20771615,
"max": 0.5813856500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.429032833333343e-05,
"min": 7.429032833333343e-05,
"max": 0.004922230849999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022287098500000028,
"min": 0.00022287098500000028,
"max": 0.014071143934999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671614892",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671617081"
},
"total": 2188.920948209,
"count": 1,
"self": 0.4008911689998058,
"children": {
"run_training.setup": {
"total": 0.10783353600004375,
"count": 1,
"self": 0.10783353600004375
},
"TrainerController.start_learning": {
"total": 2188.412223504,
"count": 1,
"self": 3.715478545092992,
"children": {
"TrainerController._reset_env": {
"total": 7.976220884999975,
"count": 1,
"self": 7.976220884999975
},
"TrainerController.advance": {
"total": 2176.605155660907,
"count": 233191,
"self": 4.080405764891566,
"children": {
"env_step": {
"total": 1703.5246587300157,
"count": 233191,
"self": 1436.3459107131198,
"children": {
"SubprocessEnvManager._take_step": {
"total": 264.7098513860948,
"count": 233191,
"self": 13.983400070135303,
"children": {
"TorchPolicy.evaluate": {
"total": 250.72645131595948,
"count": 222966,
"self": 62.71689189699623,
"children": {
"TorchPolicy.sample_actions": {
"total": 188.00955941896325,
"count": 222966,
"self": 188.00955941896325
}
}
}
}
},
"workers": {
"total": 2.468896630801055,
"count": 233191,
"self": 0.0,
"children": {
"worker_root": {
"total": 2180.5615284477735,
"count": 233191,
"is_parallel": true,
"self": 996.6386857247567,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021719050000683637,
"count": 1,
"is_parallel": true,
"self": 0.00031311600002936757,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018587890000389962,
"count": 2,
"is_parallel": true,
"self": 0.0018587890000389962
}
}
},
"UnityEnvironment.step": {
"total": 0.026376110999990487,
"count": 1,
"is_parallel": true,
"self": 0.00027928099996188394,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001773129999946832,
"count": 1,
"is_parallel": true,
"self": 0.0001773129999946832
},
"communicator.exchange": {
"total": 0.025174415000037698,
"count": 1,
"is_parallel": true,
"self": 0.025174415000037698
},
"steps_from_proto": {
"total": 0.0007451019999962227,
"count": 1,
"is_parallel": true,
"self": 0.00023701699990397174,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000508085000092251,
"count": 2,
"is_parallel": true,
"self": 0.000508085000092251
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1183.9228427230169,
"count": 233190,
"is_parallel": true,
"self": 34.196790766259255,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.87435808492728,
"count": 233190,
"is_parallel": true,
"self": 74.87435808492728
},
"communicator.exchange": {
"total": 984.0718693798867,
"count": 233190,
"is_parallel": true,
"self": 984.0718693798867
},
"steps_from_proto": {
"total": 90.7798244919436,
"count": 233190,
"is_parallel": true,
"self": 37.11595759407919,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.66386689786441,
"count": 466380,
"is_parallel": true,
"self": 53.66386689786441
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 469.0000911659995,
"count": 233191,
"self": 5.897698248082293,
"children": {
"process_trajectory": {
"total": 149.8470864059177,
"count": 233191,
"self": 148.69645839291798,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1506280129997322,
"count": 10,
"self": 1.1506280129997322
}
}
},
"_update_policy": {
"total": 313.2553065119995,
"count": 97,
"self": 260.47667216199204,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.77863435000745,
"count": 2910,
"self": 52.77863435000745
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.160001587180886e-07,
"count": 1,
"self": 7.160001587180886e-07
},
"TrainerController._save_models": {
"total": 0.11536769699978322,
"count": 1,
"self": 0.0018828279994522745,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11348486900033095,
"count": 1,
"self": 0.11348486900033095
}
}
}
}
}
}
}