ppo-Huggy / run_logs /timers.json
harryrudolph's picture
Huggy
de8f976
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4063256978988647,
"min": 1.4063256978988647,
"max": 1.429353952407837,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70226.28125,
"min": 68755.8515625,
"max": 77525.78125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 77.9304897314376,
"min": 77.38161993769471,
"max": 413.95081967213116,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49330.0,
"min": 48924.0,
"max": 50502.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999956.0,
"min": 49874.0,
"max": 1999956.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999956.0,
"min": 49874.0,
"max": 1999956.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4412081241607666,
"min": 0.08094997704029083,
"max": 2.484259843826294,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1545.28466796875,
"min": 9.794947624206543,
"max": 1551.4227294921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8361855710475554,
"min": 1.761910484103132,
"max": 3.985196542681048,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2428.3054664731026,
"min": 213.19116857647896,
"max": 2428.3054664731026,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8361855710475554,
"min": 1.761910484103132,
"max": 3.985196542681048,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2428.3054664731026,
"min": 213.19116857647896,
"max": 2428.3054664731026,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01710995665198425,
"min": 0.013427512719257115,
"max": 0.019814353756373748,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05132986995595275,
"min": 0.02685502543851423,
"max": 0.058560732098218674,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05519077856507567,
"min": 0.022408453437189262,
"max": 0.06613522072633107,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.165572335695227,
"min": 0.044945931807160375,
"max": 0.19840566217899322,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.911198696300001e-06,
"min": 3.911198696300001e-06,
"max": 0.00029537242654252495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1733596088900002e-05,
"min": 1.1733596088900002e-05,
"max": 0.0008442087185970998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10130370000000001,
"min": 0.10130370000000001,
"max": 0.19845747500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30391110000000005,
"min": 0.20773960000000002,
"max": 0.5814029,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.505463000000001e-05,
"min": 7.505463000000001e-05,
"max": 0.004923028002500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022516389000000003,
"min": 0.00022516389000000003,
"max": 0.014072004709999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670754049",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670756340"
},
"total": 2291.4338983959997,
"count": 1,
"self": 0.39118446299971765,
"children": {
"run_training.setup": {
"total": 0.17597635600003514,
"count": 1,
"self": 0.17597635600003514
},
"TrainerController.start_learning": {
"total": 2290.866737577,
"count": 1,
"self": 3.903320093884304,
"children": {
"TrainerController._reset_env": {
"total": 12.119333309000012,
"count": 1,
"self": 12.119333309000012
},
"TrainerController.advance": {
"total": 2274.7336465171156,
"count": 232551,
"self": 4.182535599293715,
"children": {
"env_step": {
"total": 1803.44666582992,
"count": 232551,
"self": 1512.7843473479177,
"children": {
"SubprocessEnvManager._take_step": {
"total": 287.94116870997823,
"count": 232551,
"self": 15.050814695026077,
"children": {
"TorchPolicy.evaluate": {
"total": 272.89035401495215,
"count": 222894,
"self": 68.64053414593042,
"children": {
"TorchPolicy.sample_actions": {
"total": 204.24981986902174,
"count": 222894,
"self": 204.24981986902174
}
}
}
}
},
"workers": {
"total": 2.721149772024148,
"count": 232551,
"self": 0.0,
"children": {
"worker_root": {
"total": 2282.7574596501745,
"count": 232551,
"is_parallel": true,
"self": 1043.3601787621437,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002759791000016776,
"count": 1,
"is_parallel": true,
"self": 0.0005401520000418714,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022196389999749044,
"count": 2,
"is_parallel": true,
"self": 0.0022196389999749044
}
}
},
"UnityEnvironment.step": {
"total": 0.029209984999965855,
"count": 1,
"is_parallel": true,
"self": 0.0002931269999066899,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002142680000360997,
"count": 1,
"is_parallel": true,
"self": 0.0002142680000360997
},
"communicator.exchange": {
"total": 0.027978253000014774,
"count": 1,
"is_parallel": true,
"self": 0.027978253000014774
},
"steps_from_proto": {
"total": 0.0007243370000082905,
"count": 1,
"is_parallel": true,
"self": 0.00026286299998901086,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046147400001927963,
"count": 2,
"is_parallel": true,
"self": 0.00046147400001927963
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1239.3972808880308,
"count": 232550,
"is_parallel": true,
"self": 35.32164912215103,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.45199155089995,
"count": 232550,
"is_parallel": true,
"self": 80.45199155089995
},
"communicator.exchange": {
"total": 1026.6569943330046,
"count": 232550,
"is_parallel": true,
"self": 1026.6569943330046
},
"steps_from_proto": {
"total": 96.96664588197518,
"count": 232550,
"is_parallel": true,
"self": 42.29716926895594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.66947661301924,
"count": 465100,
"is_parallel": true,
"self": 54.66947661301924
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 467.10444508790187,
"count": 232551,
"self": 6.037892031889214,
"children": {
"process_trajectory": {
"total": 151.6904660050119,
"count": 232551,
"self": 151.21929098901217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.47117501599973366,
"count": 4,
"self": 0.47117501599973366
}
}
},
"_update_policy": {
"total": 309.37608705100075,
"count": 97,
"self": 256.4958889260061,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.88019812499465,
"count": 2910,
"self": 52.88019812499465
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2740001693600789e-06,
"count": 1,
"self": 1.2740001693600789e-06
},
"TrainerController._save_models": {
"total": 0.11043638299997838,
"count": 1,
"self": 0.001989643999877444,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10844673900010093,
"count": 1,
"self": 0.10844673900010093
}
}
}
}
}
}
}