chradden's picture
First Push
29d4e09
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0808089971542358,
"min": 1.0808089971542358,
"max": 2.8161678314208984,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 53928.04296875,
"min": 53928.04296875,
"max": 142250.265625,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 49936.0,
"max": 499976.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 49936.0,
"max": 499976.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.410496711730957,
"min": 0.4977417588233948,
"max": 12.410496711730957,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 12447.728515625,
"min": 493.75982666015625,
"max": 12447.728515625,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 50347.0,
"min": 48158.0,
"max": 50347.0,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.450592885375492,
"min": 4.421487603305785,
"max": 25.450592885375492,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 6439.0,
"min": 1070.0,
"max": 6439.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.450592885375492,
"min": 4.421487603305785,
"max": 25.450592885375492,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 6439.0,
"min": 1070.0,
"max": 6439.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.034703764460784214,
"min": 0.03092670888682545,
"max": 0.03544381486771026,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.13881505784313686,
"min": 0.1237068355473018,
"max": 0.17675379196891933,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2332314179441522,
"min": 0.14866675968681062,
"max": 0.29265326806713665,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9329256717766088,
"min": 0.5946670387472425,
"max": 1.4632663403356831,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.4576495141200002e-05,
"min": 1.4576495141200002e-05,
"max": 0.0002841864052712,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.830598056480001e-05,
"min": 5.830598056480001e-05,
"max": 0.0012797184734272,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1048588,
"min": 0.1048588,
"max": 0.19472880000000004,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4194352,
"min": 0.4194352,
"max": 0.9265728,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00025245412,
"min": 0.00025245412,
"max": 0.00473696712,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00100981648,
"min": 0.00100981648,
"max": 0.021335982719999997,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679676294",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679677205"
},
"total": 911.534228516,
"count": 1,
"self": 0.43875345300057234,
"children": {
"run_training.setup": {
"total": 0.11191284099959375,
"count": 1,
"self": 0.11191284099959375
},
"TrainerController.start_learning": {
"total": 910.9835622219998,
"count": 1,
"self": 0.8819165700397207,
"children": {
"TrainerController._reset_env": {
"total": 6.801290793999669,
"count": 1,
"self": 6.801290793999669
},
"TrainerController.advance": {
"total": 903.1545492259606,
"count": 45464,
"self": 0.9160812480081404,
"children": {
"env_step": {
"total": 693.2853571709693,
"count": 45464,
"self": 565.7987313059598,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.94533476202605,
"count": 45464,
"self": 6.381621722028285,
"children": {
"TorchPolicy.evaluate": {
"total": 120.56371303999777,
"count": 45464,
"self": 120.56371303999777
}
}
},
"workers": {
"total": 0.5412911029834504,
"count": 45464,
"self": 0.0,
"children": {
"worker_root": {
"total": 907.8060095219475,
"count": 45464,
"is_parallel": true,
"self": 408.4044104619711,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018736420001914666,
"count": 1,
"is_parallel": true,
"self": 0.0005965200002719939,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012771219999194727,
"count": 10,
"is_parallel": true,
"self": 0.0012771219999194727
}
}
},
"UnityEnvironment.step": {
"total": 0.03432349900003828,
"count": 1,
"is_parallel": true,
"self": 0.0006319849994724791,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004138530002819607,
"count": 1,
"is_parallel": true,
"self": 0.0004138530002819607
},
"communicator.exchange": {
"total": 0.031135356000049796,
"count": 1,
"is_parallel": true,
"self": 0.031135356000049796
},
"steps_from_proto": {
"total": 0.002142305000234046,
"count": 1,
"is_parallel": true,
"self": 0.00045947700027682004,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001682827999957226,
"count": 10,
"is_parallel": true,
"self": 0.001682827999957226
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 499.40159905997643,
"count": 45463,
"is_parallel": true,
"self": 23.05559838698082,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.039904888000365,
"count": 45463,
"is_parallel": true,
"self": 13.039904888000365
},
"communicator.exchange": {
"total": 388.7178397979683,
"count": 45463,
"is_parallel": true,
"self": 388.7178397979683
},
"steps_from_proto": {
"total": 74.58825598702697,
"count": 45463,
"is_parallel": true,
"self": 14.302805120039466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.2854508669875,
"count": 454630,
"is_parallel": true,
"self": 60.2854508669875
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 208.9531108069832,
"count": 45464,
"self": 1.0602916199259198,
"children": {
"process_trajectory": {
"total": 59.964856461056115,
"count": 45464,
"self": 59.82488411605618,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13997234499993283,
"count": 1,
"self": 0.13997234499993283
}
}
},
"_update_policy": {
"total": 147.92796272600117,
"count": 47,
"self": 113.61383091700554,
"children": {
"TorchPPOOptimizer.update": {
"total": 34.314131808995626,
"count": 2856,
"self": 34.314131808995626
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4479996934824158e-06,
"count": 1,
"self": 1.4479996934824158e-06
},
"TrainerController._save_models": {
"total": 0.14580418400009876,
"count": 1,
"self": 0.0006638550003117416,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14514032899978702,
"count": 1,
"self": 0.14514032899978702
}
}
}
}
}
}
}