Huggy / run_logs /timers.json
J4F4N4F's picture
Huggy from Deep RL Course tutorial
554c31c
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4061048030853271,
"min": 1.4061048030853271,
"max": 1.4293618202209473,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70568.1796875,
"min": 68775.125,
"max": 76349.859375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 93.30929791271348,
"min": 74.09009009009009,
"max": 420.06666666666666,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49174.0,
"min": 48834.0,
"max": 50408.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999254.0,
"min": 49848.0,
"max": 1999254.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999254.0,
"min": 49848.0,
"max": 1999254.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5037641525268555,
"min": 0.09084034711122513,
"max": 2.5080296993255615,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1319.4837646484375,
"min": 10.810001373291016,
"max": 1638.906005859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.828609245778035,
"min": 1.7253930956876578,
"max": 4.011274139436198,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2017.6770725250244,
"min": 205.32177838683128,
"max": 2579.5684235692024,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.828609245778035,
"min": 1.7253930956876578,
"max": 4.011274139436198,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2017.6770725250244,
"min": 205.32177838683128,
"max": 2579.5684235692024,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018209458475596168,
"min": 0.014207910032079477,
"max": 0.01969848735607229,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0546283754267885,
"min": 0.030475717317555488,
"max": 0.0546283754267885,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05006264224648476,
"min": 0.022879877034574746,
"max": 0.06480103911211094,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15018792673945427,
"min": 0.04575975406914949,
"max": 0.1799713918318351,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.865048711683334e-06,
"min": 3.865048711683334e-06,
"max": 0.0002953483515505499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1595146135050002e-05,
"min": 1.1595146135050002e-05,
"max": 0.0008442463685845499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012883166666667,
"min": 0.1012883166666667,
"max": 0.19844945000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3038649500000001,
"min": 0.20771065000000002,
"max": 0.58141545,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.428700166666667e-05,
"min": 7.428700166666667e-05,
"max": 0.0049226275550000006,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.000222861005,
"min": 0.000222861005,
"max": 0.014072630955000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671297417",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671299540"
},
"total": 2122.563922687,
"count": 1,
"self": 0.45914026599984936,
"children": {
"run_training.setup": {
"total": 0.11663059299996803,
"count": 1,
"self": 0.11663059299996803
},
"TrainerController.start_learning": {
"total": 2121.988151828,
"count": 1,
"self": 3.6285961650455647,
"children": {
"TrainerController._reset_env": {
"total": 6.980869005000045,
"count": 1,
"self": 6.980869005000045
},
"TrainerController.advance": {
"total": 2111.250068276954,
"count": 233170,
"self": 3.9861591721096374,
"children": {
"env_step": {
"total": 1647.5998994589806,
"count": 233170,
"self": 1384.059507550986,
"children": {
"SubprocessEnvManager._take_step": {
"total": 261.13714874897903,
"count": 233170,
"self": 13.604636140889625,
"children": {
"TorchPolicy.evaluate": {
"total": 247.5325126080894,
"count": 223012,
"self": 62.12093045917857,
"children": {
"TorchPolicy.sample_actions": {
"total": 185.41158214891084,
"count": 223012,
"self": 185.41158214891084
}
}
}
}
},
"workers": {
"total": 2.403243159015801,
"count": 233170,
"self": 0.0,
"children": {
"worker_root": {
"total": 2114.490057409938,
"count": 233170,
"is_parallel": true,
"self": 977.0699123200332,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002022893000003023,
"count": 1,
"is_parallel": true,
"self": 0.00032770500001788605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016951879999851371,
"count": 2,
"is_parallel": true,
"self": 0.0016951879999851371
}
}
},
"UnityEnvironment.step": {
"total": 0.03017283999997744,
"count": 1,
"is_parallel": true,
"self": 0.0003129720000742964,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001871929999879285,
"count": 1,
"is_parallel": true,
"self": 0.0001871929999879285
},
"communicator.exchange": {
"total": 0.028892907999988893,
"count": 1,
"is_parallel": true,
"self": 0.028892907999988893
},
"steps_from_proto": {
"total": 0.0007797669999263235,
"count": 1,
"is_parallel": true,
"self": 0.00027865399988513673,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005011130000411868,
"count": 2,
"is_parallel": true,
"self": 0.0005011130000411868
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1137.4201450899047,
"count": 233169,
"is_parallel": true,
"self": 33.33044799002437,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 72.81516488590933,
"count": 233169,
"is_parallel": true,
"self": 72.81516488590933
},
"communicator.exchange": {
"total": 941.3944284119888,
"count": 233169,
"is_parallel": true,
"self": 941.3944284119888
},
"steps_from_proto": {
"total": 89.88010380198227,
"count": 233169,
"is_parallel": true,
"self": 36.8631246808859,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.01697912109637,
"count": 466338,
"is_parallel": true,
"self": 53.01697912109637
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 459.66400964586387,
"count": 233170,
"self": 5.380786114773059,
"children": {
"process_trajectory": {
"total": 144.77483586309324,
"count": 233170,
"self": 143.63429989509268,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1405359680005631,
"count": 10,
"self": 1.1405359680005631
}
}
},
"_update_policy": {
"total": 309.50838766799757,
"count": 97,
"self": 257.36364231099435,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.14474535700322,
"count": 2910,
"self": 52.14474535700322
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.679999154992402e-07,
"count": 1,
"self": 8.679999154992402e-07
},
"TrainerController._save_models": {
"total": 0.12861751300033575,
"count": 1,
"self": 0.0028382640002746484,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1257792490000611,
"count": 1,
"self": 0.1257792490000611
}
}
}
}
}
}
}