chradden's picture
First Push
cd799d7
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1322945356369019,
"min": 1.1322945356369019,
"max": 2.8558342456817627,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10848.513671875,
"min": 10848.513671875,
"max": 29278.01171875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.974501609802246,
"min": 0.4376535713672638,
"max": 11.974501609802246,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2335.02783203125,
"min": 84.90479278564453,
"max": 2398.787841796875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06677463126800598,
"min": 0.060284501939100205,
"max": 0.07628564718784307,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2670985250720239,
"min": 0.24113800775640082,
"max": 0.3767743916724416,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.211860485009703,
"min": 0.10933664314535575,
"max": 0.2908311834522322,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.847441940038812,
"min": 0.437346572581423,
"max": 1.454155917261161,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.954545454545453,
"min": 3.272727272727273,
"max": 23.954545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1054.0,
"min": 144.0,
"max": 1291.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.954545454545453,
"min": 3.272727272727273,
"max": 23.954545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1054.0,
"min": 144.0,
"max": 1291.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679674826",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679675284"
},
"total": 458.05725936299996,
"count": 1,
"self": 0.38120635099983247,
"children": {
"run_training.setup": {
"total": 0.10415301900002305,
"count": 1,
"self": 0.10415301900002305
},
"TrainerController.start_learning": {
"total": 457.5718999930001,
"count": 1,
"self": 0.531236218999652,
"children": {
"TrainerController._reset_env": {
"total": 9.789148635999936,
"count": 1,
"self": 9.789148635999936
},
"TrainerController.advance": {
"total": 447.1038243300004,
"count": 18205,
"self": 0.27114537599527466,
"children": {
"env_step": {
"total": 446.83267895400513,
"count": 18205,
"self": 323.02302907798696,
"children": {
"SubprocessEnvManager._take_step": {
"total": 123.54844523900294,
"count": 18205,
"self": 2.042846366021763,
"children": {
"TorchPolicy.evaluate": {
"total": 121.50559887298118,
"count": 18205,
"self": 121.50559887298118
}
}
},
"workers": {
"total": 0.26120463701522567,
"count": 18205,
"self": 0.0,
"children": {
"worker_root": {
"total": 456.05685000499545,
"count": 18205,
"is_parallel": true,
"self": 216.83978242399007,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004980694999972002,
"count": 1,
"is_parallel": true,
"self": 0.003484442000285526,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001496252999686476,
"count": 10,
"is_parallel": true,
"self": 0.001496252999686476
}
}
},
"UnityEnvironment.step": {
"total": 0.043805836999922576,
"count": 1,
"is_parallel": true,
"self": 0.000545662000035918,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002816219999886016,
"count": 1,
"is_parallel": true,
"self": 0.0002816219999886016
},
"communicator.exchange": {
"total": 0.04110232799996538,
"count": 1,
"is_parallel": true,
"self": 0.04110232799996538
},
"steps_from_proto": {
"total": 0.0018762249999326741,
"count": 1,
"is_parallel": true,
"self": 0.00038694599959399056,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014892790003386835,
"count": 10,
"is_parallel": true,
"self": 0.0014892790003386835
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 239.21706758100538,
"count": 18204,
"is_parallel": true,
"self": 9.495424125030581,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.198842343005708,
"count": 18204,
"is_parallel": true,
"self": 5.198842343005708
},
"communicator.exchange": {
"total": 193.7265980409777,
"count": 18204,
"is_parallel": true,
"self": 193.7265980409777
},
"steps_from_proto": {
"total": 30.796203071991386,
"count": 18204,
"is_parallel": true,
"self": 6.041371538006274,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.75483153398511,
"count": 182040,
"is_parallel": true,
"self": 24.75483153398511
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001591729999290692,
"count": 1,
"self": 0.0001591729999290692,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 443.8729001199465,
"count": 407662,
"is_parallel": true,
"self": 9.504065423941142,
"children": {
"process_trajectory": {
"total": 246.4948647380046,
"count": 407662,
"is_parallel": true,
"self": 245.60826828500456,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8865964530000383,
"count": 4,
"is_parallel": true,
"self": 0.8865964530000383
}
}
},
"_update_policy": {
"total": 187.87396995800077,
"count": 90,
"is_parallel": true,
"self": 70.26837018799858,
"children": {
"TorchPPOOptimizer.update": {
"total": 117.60559977000219,
"count": 4587,
"is_parallel": true,
"self": 117.60559977000219
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14753163500017763,
"count": 1,
"self": 0.0009338040001694026,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14659783100000823,
"count": 1,
"self": 0.14659783100000823
}
}
}
}
}
}
}