guocheng66's picture
First Push
9b8fea6
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1129968166351318,
"min": 1.1129968166351318,
"max": 2.8705177307128906,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10626.8935546875,
"min": 10626.8935546875,
"max": 29396.97265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.070113182067871,
"min": 0.46627941727638245,
"max": 13.070113182067871,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2548.672119140625,
"min": 90.45820617675781,
"max": 2660.63671875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07387076106733373,
"min": 0.0657961568786014,
"max": 0.07536137542908775,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2954830442693349,
"min": 0.26419008571018837,
"max": 0.3768068771454387,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21529757961923,
"min": 0.12150445178621357,
"max": 0.3033773511648178,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.86119031847692,
"min": 0.48601780714485426,
"max": 1.516886755824089,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.727272727272727,
"min": 3.409090909090909,
"max": 25.954545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1132.0,
"min": 150.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.727272727272727,
"min": 3.409090909090909,
"max": 25.954545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1132.0,
"min": 150.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697525811",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697526322"
},
"total": 510.479828685,
"count": 1,
"self": 0.4252104380000219,
"children": {
"run_training.setup": {
"total": 0.04594440699997904,
"count": 1,
"self": 0.04594440699997904
},
"TrainerController.start_learning": {
"total": 510.00867384,
"count": 1,
"self": 0.6943325339987041,
"children": {
"TrainerController._reset_env": {
"total": 7.86807565700002,
"count": 1,
"self": 7.86807565700002
},
"TrainerController.advance": {
"total": 501.35960681400127,
"count": 18199,
"self": 0.3468597699924203,
"children": {
"env_step": {
"total": 501.01274704400885,
"count": 18199,
"self": 344.016084121028,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.63195785298853,
"count": 18199,
"self": 1.6046787289851068,
"children": {
"TorchPolicy.evaluate": {
"total": 155.02727912400343,
"count": 18199,
"self": 155.02727912400343
}
}
},
"workers": {
"total": 0.36470506999233976,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 508.54123882799973,
"count": 18199,
"is_parallel": true,
"self": 244.48410853999957,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007447159999969699,
"count": 1,
"is_parallel": true,
"self": 0.005072614000027897,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023745459999418017,
"count": 10,
"is_parallel": true,
"self": 0.0023745459999418017
}
}
},
"UnityEnvironment.step": {
"total": 0.04122965300001624,
"count": 1,
"is_parallel": true,
"self": 0.0007963630000062949,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005186839999851145,
"count": 1,
"is_parallel": true,
"self": 0.0005186839999851145
},
"communicator.exchange": {
"total": 0.03754048700000112,
"count": 1,
"is_parallel": true,
"self": 0.03754048700000112
},
"steps_from_proto": {
"total": 0.002374119000023711,
"count": 1,
"is_parallel": true,
"self": 0.0004633349999494385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019107840000742726,
"count": 10,
"is_parallel": true,
"self": 0.0019107840000742726
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 264.05713028800017,
"count": 18198,
"is_parallel": true,
"self": 11.522113546007574,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.671431906002738,
"count": 18198,
"is_parallel": true,
"self": 5.671431906002738
},
"communicator.exchange": {
"total": 210.32674423398987,
"count": 18198,
"is_parallel": true,
"self": 210.32674423398987
},
"steps_from_proto": {
"total": 36.536840601999984,
"count": 18198,
"is_parallel": true,
"self": 7.351540793003721,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.185299808996263,
"count": 181980,
"is_parallel": true,
"self": 29.185299808996263
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013037400003668154,
"count": 1,
"self": 0.00013037400003668154,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 497.39841414299514,
"count": 513111,
"is_parallel": true,
"self": 10.944225814990546,
"children": {
"process_trajectory": {
"total": 282.5375671270046,
"count": 513111,
"is_parallel": true,
"self": 281.73514284200456,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8024242850000292,
"count": 4,
"is_parallel": true,
"self": 0.8024242850000292
}
}
},
"_update_policy": {
"total": 203.916621201,
"count": 90,
"is_parallel": true,
"self": 61.74518864399897,
"children": {
"TorchPPOOptimizer.update": {
"total": 142.17143255700103,
"count": 4587,
"is_parallel": true,
"self": 142.17143255700103
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08652846099994349,
"count": 1,
"self": 0.0008082479999984571,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08572021299994503,
"count": 1,
"self": 0.08572021299994503
}
}
}
}
}
}
}