bkhan2000's picture
First Push
35f89f6
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1141241788864136,
"min": 1.1141241788864136,
"max": 2.862166404724121,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10662.16796875,
"min": 10662.16796875,
"max": 29374.4140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.2818021774292,
"min": 0.3608442544937134,
"max": 12.2818021774292,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2394.951416015625,
"min": 70.0037841796875,
"max": 2467.65185546875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07177798978356056,
"min": 0.06493369726459586,
"max": 0.07533502643876572,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28711195913424226,
"min": 0.26601369200019975,
"max": 0.3670086737404869,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19383228073517483,
"min": 0.134061215210286,
"max": 0.26429082307161067,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7753291229406993,
"min": 0.536244860841144,
"max": 1.2766428543072121,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.636363636363637,
"min": 3.590909090909091,
"max": 24.636363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1084.0,
"min": 158.0,
"max": 1330.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.636363636363637,
"min": 3.590909090909091,
"max": 24.636363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1084.0,
"min": 158.0,
"max": 1330.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678238558",
"python_version": "3.8.10 (default, Jun 22 2022, 20:18:18) \n[GCC 9.4.0]",
"command_line_arguments": "/home/hanbk/torch_venv/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.2+cu111",
"numpy_version": "1.20.1",
"end_time_seconds": "1678238835"
},
"total": 276.6171004669741,
"count": 1,
"self": 0.2759075569920242,
"children": {
"run_training.setup": {
"total": 0.1846781269996427,
"count": 1,
"self": 0.1846781269996427
},
"TrainerController.start_learning": {
"total": 276.1565147829824,
"count": 1,
"self": 0.4244566017296165,
"children": {
"TrainerController._reset_env": {
"total": 3.4115457470179535,
"count": 1,
"self": 3.4115457470179535
},
"TrainerController.advance": {
"total": 272.2176226982265,
"count": 18204,
"self": 0.201736168586649,
"children": {
"env_step": {
"total": 272.01588652963983,
"count": 18204,
"self": 173.95540132117458,
"children": {
"SubprocessEnvManager._take_step": {
"total": 97.86607464996632,
"count": 18204,
"self": 1.0105102944071405,
"children": {
"TorchPolicy.evaluate": {
"total": 96.85556435555918,
"count": 18204,
"self": 17.581195001432206,
"children": {
"TorchPolicy.sample_actions": {
"total": 79.27436935412697,
"count": 18204,
"self": 79.27436935412697
}
}
}
}
},
"workers": {
"total": 0.19441055849893019,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 275.7558832904906,
"count": 18204,
"is_parallel": true,
"self": 156.58989730780013,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016646040021441877,
"count": 1,
"is_parallel": true,
"self": 0.0005133821396157146,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011512218625284731,
"count": 10,
"is_parallel": true,
"self": 0.0011512218625284731
}
}
},
"UnityEnvironment.step": {
"total": 0.02015393995679915,
"count": 1,
"is_parallel": true,
"self": 0.0002854789490811527,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002344120293855667,
"count": 1,
"is_parallel": true,
"self": 0.0002344120293855667
},
"communicator.exchange": {
"total": 0.018572524015326053,
"count": 1,
"is_parallel": true,
"self": 0.018572524015326053
},
"steps_from_proto": {
"total": 0.0010615249630063772,
"count": 1,
"is_parallel": true,
"self": 0.0002779939095489681,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007835310534574091,
"count": 10,
"is_parallel": true,
"self": 0.0007835310534574091
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 119.16598598269047,
"count": 18203,
"is_parallel": true,
"self": 4.92347563279327,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.876449733390473,
"count": 18203,
"is_parallel": true,
"self": 2.876449733390473
},
"communicator.exchange": {
"total": 93.98589113960043,
"count": 18203,
"is_parallel": true,
"self": 93.98589113960043
},
"steps_from_proto": {
"total": 17.380169476906303,
"count": 18203,
"is_parallel": true,
"self": 4.329606411862187,
"children": {
"_process_rank_one_or_two_observation": {
"total": 13.050563065044116,
"count": 182030,
"is_parallel": true,
"self": 13.050563065044116
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010000698966905475,
"count": 1,
"self": 0.00010000698966905475,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 271.648424321902,
"count": 229836,
"is_parallel": true,
"self": 2.4784274172852747,
"children": {
"process_trajectory": {
"total": 145.95277851022547,
"count": 229836,
"is_parallel": true,
"self": 145.31259441218572,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6401840980397537,
"count": 4,
"is_parallel": true,
"self": 0.6401840980397537
}
}
},
"_update_policy": {
"total": 123.21721839439124,
"count": 90,
"is_parallel": true,
"self": 35.11600922199432,
"children": {
"TorchPPOOptimizer.update": {
"total": 88.10120917239692,
"count": 4587,
"is_parallel": true,
"self": 88.10120917239692
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.10278972901869565,
"count": 1,
"self": 0.0006025730399414897,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10218715597875416,
"count": 1,
"self": 0.10218715597875416
}
}
}
}
}
}
}