Suprabound's picture
First Push
032df5a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7951968312263489,
"min": 0.7475613951683044,
"max": 2.8866794109344482,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7907.4375,
"min": 7448.94287109375,
"max": 30007.033203125,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.512768745422363,
"min": 0.3120355010032654,
"max": 13.512768745422363,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 3432.2431640625,
"min": 76.13666534423828,
"max": 3432.2431640625,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07381167833547235,
"min": 0.06262896556325037,
"max": 0.07381167833547235,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.36905839167736176,
"min": 0.25639183441800173,
"max": 0.36905839167736176,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17600076622238342,
"min": 0.10130362485549138,
"max": 0.2597944387326053,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8800038311119172,
"min": 0.4052144994219655,
"max": 1.2958696519627289,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.017698982400001e-06,
"min": 1.017698982400001e-06,
"max": 9.891760108239999e-05,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.088494912000005e-06,
"min": 5.088494912000005e-06,
"max": 0.0004846880153120001,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.509090909090908,
"min": 3.090909090909091,
"max": 27.0,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1458.0,
"min": 136.0,
"max": 1462.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.509090909090908,
"min": 3.090909090909091,
"max": 27.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1458.0,
"min": 136.0,
"max": 1462.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679244830",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679246061"
},
"total": 1231.3003933300001,
"count": 1,
"self": 0.43543385100019805,
"children": {
"run_training.setup": {
"total": 0.10682355299991286,
"count": 1,
"self": 0.10682355299991286
},
"TrainerController.start_learning": {
"total": 1230.758135926,
"count": 1,
"self": 1.5343944079565972,
"children": {
"TrainerController._reset_env": {
"total": 9.167053892000013,
"count": 1,
"self": 9.167053892000013
},
"TrainerController.advance": {
"total": 1219.9215198710433,
"count": 45501,
"self": 0.7880336770233498,
"children": {
"env_step": {
"total": 1219.13348619402,
"count": 45501,
"self": 886.7797826329861,
"children": {
"SubprocessEnvManager._take_step": {
"total": 331.5910375610281,
"count": 45501,
"self": 5.306665165036748,
"children": {
"TorchPolicy.evaluate": {
"total": 326.28437239599134,
"count": 45501,
"self": 326.28437239599134
}
}
},
"workers": {
"total": 0.7626660000057655,
"count": 45501,
"self": 0.0,
"children": {
"worker_root": {
"total": 1226.8539187150138,
"count": 45501,
"is_parallel": true,
"self": 583.0167319990453,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006139709999843035,
"count": 1,
"is_parallel": true,
"self": 0.004478027999994083,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016616819998489518,
"count": 10,
"is_parallel": true,
"self": 0.0016616819998489518
}
}
},
"UnityEnvironment.step": {
"total": 0.06958735200009869,
"count": 1,
"is_parallel": true,
"self": 0.0005623690001357318,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038745799997741415,
"count": 1,
"is_parallel": true,
"self": 0.00038745799997741415
},
"communicator.exchange": {
"total": 0.06682451300002867,
"count": 1,
"is_parallel": true,
"self": 0.06682451300002867
},
"steps_from_proto": {
"total": 0.0018130119999568706,
"count": 1,
"is_parallel": true,
"self": 0.00034694599935392034,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014660660006029502,
"count": 10,
"is_parallel": true,
"self": 0.0014660660006029502
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 643.8371867159685,
"count": 45500,
"is_parallel": true,
"self": 25.35274791805159,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.484451376008792,
"count": 45500,
"is_parallel": true,
"self": 13.484451376008792
},
"communicator.exchange": {
"total": 522.2543289079554,
"count": 45500,
"is_parallel": true,
"self": 522.2543289079554
},
"steps_from_proto": {
"total": 82.74565851395278,
"count": 45500,
"is_parallel": true,
"self": 17.058668387067883,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.6869901268849,
"count": 455000,
"is_parallel": true,
"self": 65.6869901268849
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002855590000763186,
"count": 1,
"self": 0.0002855590000763186,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1209.9450187337563,
"count": 1187162,
"is_parallel": true,
"self": 29.26373717560432,
"children": {
"process_trajectory": {
"total": 733.3719909351544,
"count": 1187162,
"is_parallel": true,
"self": 730.642971229154,
"children": {
"RLTrainer._checkpoint": {
"total": 2.729019706000372,
"count": 10,
"is_parallel": true,
"self": 2.729019706000372
}
}
},
"_update_policy": {
"total": 447.30929062299765,
"count": 227,
"is_parallel": true,
"self": 172.83708420798894,
"children": {
"TorchPPOOptimizer.update": {
"total": 274.4722064150087,
"count": 11571,
"is_parallel": true,
"self": 274.4722064150087
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13488219600003504,
"count": 1,
"self": 0.0009476750001340406,
"children": {
"RLTrainer._checkpoint": {
"total": 0.133934520999901,
"count": 1,
"self": 0.133934520999901
}
}
}
}
}
}
}