taohoang's picture
First Push
522753a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9244474172592163,
"min": 0.9244474172592163,
"max": 2.8679277896881104,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8857.130859375,
"min": 8857.130859375,
"max": 29370.44921875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.676702499389648,
"min": 0.34530267119407654,
"max": 12.676702499389648,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2471.95703125,
"min": 66.98871612548828,
"max": 2538.583984375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06038221494796459,
"min": 0.06038221494796459,
"max": 0.07363092525766286,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24152885979185837,
"min": 0.24152885979185837,
"max": 0.35982033491360627,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20360718389936522,
"min": 0.097072697511655,
"max": 0.27841350862792896,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8144287355974609,
"min": 0.38829079004662,
"max": 1.3920675431396448,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.113636363636363,
"min": 2.659090909090909,
"max": 25.113636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1105.0,
"min": 117.0,
"max": 1376.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.113636363636363,
"min": 2.659090909090909,
"max": 25.113636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1105.0,
"min": 117.0,
"max": 1376.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679130380",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --force --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679130843"
},
"total": 462.76883739100003,
"count": 1,
"self": 0.7909385610001891,
"children": {
"run_training.setup": {
"total": 0.10389317699991807,
"count": 1,
"self": 0.10389317699991807
},
"TrainerController.start_learning": {
"total": 461.8740056529999,
"count": 1,
"self": 0.5474804739974388,
"children": {
"TrainerController._reset_env": {
"total": 5.629057169000021,
"count": 1,
"self": 5.629057169000021
},
"TrainerController.advance": {
"total": 455.46545854600254,
"count": 18215,
"self": 0.2735065470074005,
"children": {
"env_step": {
"total": 455.19195199899514,
"count": 18215,
"self": 325.1551221729949,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.769632424007,
"count": 18215,
"self": 2.607107731027554,
"children": {
"TorchPolicy.evaluate": {
"total": 127.16252469297945,
"count": 18215,
"self": 127.16252469297945
}
}
},
"workers": {
"total": 0.2671974019932577,
"count": 18215,
"self": 0.0,
"children": {
"worker_root": {
"total": 460.0644846110022,
"count": 18215,
"is_parallel": true,
"self": 218.56354549100945,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020350980000785057,
"count": 1,
"is_parallel": true,
"self": 0.0006161099998962527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001418988000182253,
"count": 10,
"is_parallel": true,
"self": 0.001418988000182253
}
}
},
"UnityEnvironment.step": {
"total": 0.03498414499995306,
"count": 1,
"is_parallel": true,
"self": 0.00047987899995405314,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003759369999443152,
"count": 1,
"is_parallel": true,
"self": 0.0003759369999443152
},
"communicator.exchange": {
"total": 0.032486010000070564,
"count": 1,
"is_parallel": true,
"self": 0.032486010000070564
},
"steps_from_proto": {
"total": 0.001642318999984127,
"count": 1,
"is_parallel": true,
"self": 0.0003993259999788279,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012429930000052991,
"count": 10,
"is_parallel": true,
"self": 0.0012429930000052991
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 241.50093911999272,
"count": 18214,
"is_parallel": true,
"self": 9.471685862007803,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.322491853003271,
"count": 18214,
"is_parallel": true,
"self": 5.322491853003271
},
"communicator.exchange": {
"total": 194.6089596759915,
"count": 18214,
"is_parallel": true,
"self": 194.6089596759915
},
"steps_from_proto": {
"total": 32.097801728990135,
"count": 18214,
"is_parallel": true,
"self": 6.354615287061961,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.743186441928174,
"count": 182140,
"is_parallel": true,
"self": 25.743186441928174
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011425299999245908,
"count": 1,
"self": 0.00011425299999245908,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 452.00412508902195,
"count": 414764,
"is_parallel": true,
"self": 10.098914213028252,
"children": {
"process_trajectory": {
"total": 251.41748861999372,
"count": 414764,
"is_parallel": true,
"self": 249.89419376099363,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5232948590000888,
"count": 4,
"is_parallel": true,
"self": 1.5232948590000888
}
}
},
"_update_policy": {
"total": 190.48772225599998,
"count": 90,
"is_parallel": true,
"self": 70.87039021900216,
"children": {
"TorchPPOOptimizer.update": {
"total": 119.61733203699782,
"count": 4587,
"is_parallel": true,
"self": 119.61733203699782
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23189521099993726,
"count": 1,
"self": 0.0011407019999296608,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2307545090000076,
"count": 1,
"self": 0.2307545090000076
}
}
}
}
}
}
}