tvnguyen's picture
Second Push
6b509fc
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0181849002838135,
"min": 1.0181849002838135,
"max": 2.869067430496216,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9744.029296875,
"min": 9744.029296875,
"max": 29382.119140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.160003662109375,
"min": 0.4875902533531189,
"max": 12.160003662109375,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2371.20068359375,
"min": 94.5925064086914,
"max": 2441.051513671875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.070347283562239,
"min": 0.06265756629637668,
"max": 0.07721666063869134,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.281389134248956,
"min": 0.2694512789203859,
"max": 0.3533268941557217,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2039208634957379,
"min": 0.11877324702172522,
"max": 0.2721210952629061,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8156834539829516,
"min": 0.47509298808690087,
"max": 1.3181961167092417,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.477272727272727,
"min": 3.2954545454545454,
"max": 24.477272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1077.0,
"min": 145.0,
"max": 1304.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.477272727272727,
"min": 3.2954545454545454,
"max": 24.477272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1077.0,
"min": 145.0,
"max": 1304.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681623375",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681623833"
},
"total": 458.0265646050001,
"count": 1,
"self": 0.38300427200022114,
"children": {
"run_training.setup": {
"total": 0.1155008779999207,
"count": 1,
"self": 0.1155008779999207
},
"TrainerController.start_learning": {
"total": 457.52805945499995,
"count": 1,
"self": 0.5559640469959959,
"children": {
"TrainerController._reset_env": {
"total": 4.65709937400004,
"count": 1,
"self": 4.65709937400004
},
"TrainerController.advance": {
"total": 452.1859914050036,
"count": 18203,
"self": 0.28491895599574946,
"children": {
"env_step": {
"total": 451.90107244900787,
"count": 18203,
"self": 333.2329898520088,
"children": {
"SubprocessEnvManager._take_step": {
"total": 118.39570752500117,
"count": 18203,
"self": 1.6677599020064235,
"children": {
"TorchPolicy.evaluate": {
"total": 116.72794762299475,
"count": 18203,
"self": 116.72794762299475
}
}
},
"workers": {
"total": 0.27237507199788524,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 456.10189300999343,
"count": 18203,
"is_parallel": true,
"self": 209.75671258999387,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00477863600008277,
"count": 1,
"is_parallel": true,
"self": 0.003312061000315225,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014665749997675448,
"count": 10,
"is_parallel": true,
"self": 0.0014665749997675448
}
}
},
"UnityEnvironment.step": {
"total": 0.05394877300000189,
"count": 1,
"is_parallel": true,
"self": 0.0005987110000660323,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004127369999196162,
"count": 1,
"is_parallel": true,
"self": 0.0004127369999196162
},
"communicator.exchange": {
"total": 0.05108210499997767,
"count": 1,
"is_parallel": true,
"self": 0.05108210499997767
},
"steps_from_proto": {
"total": 0.001855220000038571,
"count": 1,
"is_parallel": true,
"self": 0.0003992979999338786,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014559220001046924,
"count": 10,
"is_parallel": true,
"self": 0.0014559220001046924
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 246.34518041999956,
"count": 18202,
"is_parallel": true,
"self": 9.798130508982581,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.284078581016047,
"count": 18202,
"is_parallel": true,
"self": 5.284078581016047
},
"communicator.exchange": {
"total": 199.72000690900302,
"count": 18202,
"is_parallel": true,
"self": 199.72000690900302
},
"steps_from_proto": {
"total": 31.54296442099792,
"count": 18202,
"is_parallel": true,
"self": 6.255794195963858,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.28717022503406,
"count": 182020,
"is_parallel": true,
"self": 25.28717022503406
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010963400018226821,
"count": 1,
"self": 0.00010963400018226821,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 448.89309714301953,
"count": 402905,
"is_parallel": true,
"self": 9.538800294992257,
"children": {
"process_trajectory": {
"total": 246.72253722402831,
"count": 402905,
"is_parallel": true,
"self": 245.9571254650284,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7654117589999032,
"count": 4,
"is_parallel": true,
"self": 0.7654117589999032
}
}
},
"_update_policy": {
"total": 192.63175962399896,
"count": 90,
"is_parallel": true,
"self": 73.27153149699961,
"children": {
"TorchPPOOptimizer.update": {
"total": 119.36022812699935,
"count": 4587,
"is_parallel": true,
"self": 119.36022812699935
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1288949950001097,
"count": 1,
"self": 0.000863404000256196,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1280315909998535,
"count": 1,
"self": 0.1280315909998535
}
}
}
}
}
}
}