ppo-Huggy / run_logs /timers.json
Jatayoo's picture
Huggy
a388b77
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4045751094818115,
"min": 1.4045751094818115,
"max": 1.4247618913650513,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70692.265625,
"min": 68215.84375,
"max": 77782.3046875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 77.52099533437014,
"min": 71.27062228654124,
"max": 420.09166666666664,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49846.0,
"min": 48967.0,
"max": 50411.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999755.0,
"min": 49849.0,
"max": 1999755.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999755.0,
"min": 49849.0,
"max": 1999755.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4866421222686768,
"min": 0.04835844412446022,
"max": 2.5438668727874756,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1598.910888671875,
"min": 5.754654884338379,
"max": 1735.498291015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8785276085740854,
"min": 1.815664401575297,
"max": 4.012641269004597,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2493.893252313137,
"min": 216.06406378746033,
"max": 2698.1990717053413,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8785276085740854,
"min": 1.815664401575297,
"max": 4.012641269004597,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2493.893252313137,
"min": 216.06406378746033,
"max": 2698.1990717053413,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017703914142071477,
"min": 0.013329740625663868,
"max": 0.020829139723718984,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05311174242621443,
"min": 0.027325222584962224,
"max": 0.058402147528249765,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0564460139721632,
"min": 0.02213837237407764,
"max": 0.06537524033337833,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1693380419164896,
"min": 0.04427674474815528,
"max": 0.18938991961379847,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6620487793499926e-06,
"min": 3.6620487793499926e-06,
"max": 0.00029536567654477497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0986146338049977e-05,
"min": 1.0986146338049977e-05,
"max": 0.0008440035186654999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122065000000001,
"min": 0.10122065000000001,
"max": 0.198455225,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30366195,
"min": 0.20756545000000004,
"max": 0.5813345,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.091043499999986e-05,
"min": 7.091043499999986e-05,
"max": 0.004922915727500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021273130499999956,
"min": 0.00021273130499999956,
"max": 0.014068591550000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677515405",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677517784"
},
"total": 2378.59438378,
"count": 1,
"self": 0.442057976000342,
"children": {
"run_training.setup": {
"total": 0.11524298300003011,
"count": 1,
"self": 0.11524298300003011
},
"TrainerController.start_learning": {
"total": 2378.037082821,
"count": 1,
"self": 4.250811005933883,
"children": {
"TrainerController._reset_env": {
"total": 10.37874621100002,
"count": 1,
"self": 10.37874621100002
},
"TrainerController.advance": {
"total": 2363.299461887066,
"count": 233266,
"self": 4.794737783171513,
"children": {
"env_step": {
"total": 1833.3239982249715,
"count": 233266,
"self": 1535.632200278991,
"children": {
"SubprocessEnvManager._take_step": {
"total": 294.86584422893895,
"count": 233266,
"self": 15.914651393889358,
"children": {
"TorchPolicy.evaluate": {
"total": 278.9511928350496,
"count": 222944,
"self": 70.0442329961669,
"children": {
"TorchPolicy.sample_actions": {
"total": 208.9069598388827,
"count": 222944,
"self": 208.9069598388827
}
}
}
}
},
"workers": {
"total": 2.8259537170416706,
"count": 233266,
"self": 0.0,
"children": {
"worker_root": {
"total": 2369.2741656740427,
"count": 233266,
"is_parallel": true,
"self": 1122.781660228122,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009367059999476623,
"count": 1,
"is_parallel": true,
"self": 0.00034282199999324803,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005938839999544143,
"count": 2,
"is_parallel": true,
"self": 0.0005938839999544143
}
}
},
"UnityEnvironment.step": {
"total": 0.030082133999940197,
"count": 1,
"is_parallel": true,
"self": 0.00033727899995028565,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039089500000955013,
"count": 1,
"is_parallel": true,
"self": 0.00039089500000955013
},
"communicator.exchange": {
"total": 0.028607319999991887,
"count": 1,
"is_parallel": true,
"self": 0.028607319999991887
},
"steps_from_proto": {
"total": 0.0007466399999884743,
"count": 1,
"is_parallel": true,
"self": 0.0002613249999967593,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000485314999991715,
"count": 2,
"is_parallel": true,
"self": 0.000485314999991715
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1246.4925054459206,
"count": 233265,
"is_parallel": true,
"self": 38.10691009899324,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.39065260292023,
"count": 233265,
"is_parallel": true,
"self": 77.39065260292023
},
"communicator.exchange": {
"total": 1039.077584517989,
"count": 233265,
"is_parallel": true,
"self": 1039.077584517989
},
"steps_from_proto": {
"total": 91.91735822601811,
"count": 233265,
"is_parallel": true,
"self": 37.14079273279651,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.7765654932216,
"count": 466530,
"is_parallel": true,
"self": 54.7765654932216
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 525.180725878923,
"count": 233266,
"self": 6.547001170969224,
"children": {
"process_trajectory": {
"total": 169.07796106995465,
"count": 233266,
"self": 167.70282611995435,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3751349500003016,
"count": 10,
"self": 1.3751349500003016
}
}
},
"_update_policy": {
"total": 349.5557636379991,
"count": 97,
"self": 292.30064266899956,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.25512096899956,
"count": 2910,
"self": 57.25512096899956
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.000000318337698e-07,
"count": 1,
"self": 9.000000318337698e-07
},
"TrainerController._save_models": {
"total": 0.10806281699979081,
"count": 1,
"self": 0.0020595269998011645,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10600328999998965,
"count": 1,
"self": 0.10600328999998965
}
}
}
}
}
}
}