ppo-Huggy / run_logs /timers.json
santit96's picture
Huggy
026955f
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4044647216796875,
"min": 1.4044647216796875,
"max": 1.4264479875564575,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71777.9765625,
"min": 68396.2109375,
"max": 77573.1875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.17117117117117,
"min": 80.34690553745928,
"max": 400.072,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49490.0,
"min": 48991.0,
"max": 50009.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999927.0,
"min": 49595.0,
"max": 1999927.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999927.0,
"min": 49595.0,
"max": 1999927.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4819579124450684,
"min": -0.025139067322015762,
"max": 2.4819579124450684,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1377.4866943359375,
"min": -3.117244243621826,
"max": 1508.261474609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8872570130202146,
"min": 1.7772189527750015,
"max": 4.013602175594361,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2157.427642226219,
"min": 220.3751501441002,
"max": 2428.2293162345886,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8872570130202146,
"min": 1.7772189527750015,
"max": 4.013602175594361,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2157.427642226219,
"min": 220.3751501441002,
"max": 2428.2293162345886,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016379882119023626,
"min": 0.012211025325162456,
"max": 0.01932797508391862,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04913964635707088,
"min": 0.024422050650324913,
"max": 0.05517510596255307,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.054827052230636275,
"min": 0.023543307868142925,
"max": 0.0600398037996557,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16448115669190883,
"min": 0.04708661573628585,
"max": 0.1801194113989671,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4238988587333313e-06,
"min": 3.4238988587333313e-06,
"max": 0.000295245001585,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0271696576199994e-05,
"min": 1.0271696576199994e-05,
"max": 0.00084373396875535,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114126666666667,
"min": 0.10114126666666667,
"max": 0.19841499999999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034238,
"min": 0.20741069999999995,
"max": 0.5812446499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.694920666666666e-05,
"min": 6.694920666666666e-05,
"max": 0.0049209085000000005,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020084761999999997,
"min": 0.00020084761999999997,
"max": 0.014064108035000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673286100",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673288368"
},
"total": 2268.650392639,
"count": 1,
"self": 0.3938574650001101,
"children": {
"run_training.setup": {
"total": 0.10260182000001805,
"count": 1,
"self": 0.10260182000001805
},
"TrainerController.start_learning": {
"total": 2268.153933354,
"count": 1,
"self": 3.983922794924638,
"children": {
"TrainerController._reset_env": {
"total": 9.004482844999984,
"count": 1,
"self": 9.004482844999984
},
"TrainerController.advance": {
"total": 2255.0508760020753,
"count": 232773,
"self": 4.2754742329966575,
"children": {
"env_step": {
"total": 1783.3254087410369,
"count": 232773,
"self": 1494.8660454640642,
"children": {
"SubprocessEnvManager._take_step": {
"total": 285.6883205708808,
"count": 232773,
"self": 14.509860144870458,
"children": {
"TorchPolicy.evaluate": {
"total": 271.17846042601036,
"count": 222982,
"self": 67.92480045709317,
"children": {
"TorchPolicy.sample_actions": {
"total": 203.2536599689172,
"count": 222982,
"self": 203.2536599689172
}
}
}
}
},
"workers": {
"total": 2.771042706091862,
"count": 232773,
"self": 0.0,
"children": {
"worker_root": {
"total": 2260.138345082096,
"count": 232773,
"is_parallel": true,
"self": 1032.4326855221198,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021349109999846405,
"count": 1,
"is_parallel": true,
"self": 0.00041414600002553925,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017207649999591013,
"count": 2,
"is_parallel": true,
"self": 0.0017207649999591013
}
}
},
"UnityEnvironment.step": {
"total": 0.03161872199996196,
"count": 1,
"is_parallel": true,
"self": 0.00027266899996902794,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018640600001162966,
"count": 1,
"is_parallel": true,
"self": 0.00018640600001162966
},
"communicator.exchange": {
"total": 0.03044248699995933,
"count": 1,
"is_parallel": true,
"self": 0.03044248699995933
},
"steps_from_proto": {
"total": 0.0007171600000219769,
"count": 1,
"is_parallel": true,
"self": 0.00024962600002709223,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046753399999488465,
"count": 2,
"is_parallel": true,
"self": 0.00046753399999488465
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1227.705659559976,
"count": 232772,
"is_parallel": true,
"self": 35.13516575310905,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.52296811292706,
"count": 232772,
"is_parallel": true,
"self": 79.52296811292706
},
"communicator.exchange": {
"total": 1012.8141563328979,
"count": 232772,
"is_parallel": true,
"self": 1012.8141563328979
},
"steps_from_proto": {
"total": 100.23336936104204,
"count": 232772,
"is_parallel": true,
"self": 41.40137913894466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.831990222097374,
"count": 465544,
"is_parallel": true,
"self": 58.831990222097374
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 467.44999302804183,
"count": 232773,
"self": 6.2260212221528946,
"children": {
"process_trajectory": {
"total": 152.2730942648899,
"count": 232773,
"self": 150.94488209689007,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3282121679998227,
"count": 10,
"self": 1.3282121679998227
}
}
},
"_update_policy": {
"total": 308.95087754099904,
"count": 97,
"self": 255.99913260898506,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.95174493201398,
"count": 2910,
"self": 52.95174493201398
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.580001692986116e-07,
"count": 1,
"self": 7.580001692986116e-07
},
"TrainerController._save_models": {
"total": 0.11465095399989877,
"count": 1,
"self": 0.00207617800015214,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11257477599974663,
"count": 1,
"self": 0.11257477599974663
}
}
}
}
}
}
}