dussinus's picture
first push
e11fe59
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.081977128982544,
"min": 1.081977128982544,
"max": 2.855358839035034,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10330.7177734375,
"min": 10330.7177734375,
"max": 29241.73046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.133866310119629,
"min": 0.33693596720695496,
"max": 13.133866310119629,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2561.10400390625,
"min": 65.3655776977539,
"max": 2660.630615234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06554590235828073,
"min": 0.06141091940582341,
"max": 0.07310232228209154,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2621836094331229,
"min": 0.24564367762329364,
"max": 0.36551161141045774,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21595617657636895,
"min": 0.12116885256717283,
"max": 0.2965412577285486,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8638247063054758,
"min": 0.4846754102686913,
"max": 1.4827062886427431,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.0776097306000009e-05,
"min": 1.0776097306000009e-05,
"max": 0.000389176002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.3104389224000035e-05,
"min": 4.3104389224000035e-05,
"max": 0.0018468800382800002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.197294,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.004864970599999999,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828000000003,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.727272727272727,
"min": 3.7045454545454546,
"max": 26.12727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1132.0,
"min": 163.0,
"max": 1437.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.727272727272727,
"min": 3.7045454545454546,
"max": 26.12727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1132.0,
"min": 163.0,
"max": 1437.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679307174",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679307691"
},
"total": 517.1323006350001,
"count": 1,
"self": 0.43211895700017067,
"children": {
"run_training.setup": {
"total": 0.10514019000004282,
"count": 1,
"self": 0.10514019000004282
},
"TrainerController.start_learning": {
"total": 516.5950414879999,
"count": 1,
"self": 0.5760805789894903,
"children": {
"TrainerController._reset_env": {
"total": 10.358029982999938,
"count": 1,
"self": 10.358029982999938
},
"TrainerController.advance": {
"total": 505.5194288790103,
"count": 18201,
"self": 0.2994573289956861,
"children": {
"env_step": {
"total": 505.21997155001463,
"count": 18201,
"self": 371.39660851302233,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.54428674297833,
"count": 18201,
"self": 2.1186576619716107,
"children": {
"TorchPolicy.evaluate": {
"total": 131.42562908100672,
"count": 18201,
"self": 131.42562908100672
}
}
},
"workers": {
"total": 0.2790762940139757,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 514.9266196729905,
"count": 18201,
"is_parallel": true,
"self": 263.6994217679951,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005627452999988236,
"count": 1,
"is_parallel": true,
"self": 0.004037738999954854,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001589714000033382,
"count": 10,
"is_parallel": true,
"self": 0.001589714000033382
}
}
},
"UnityEnvironment.step": {
"total": 0.037078744000154984,
"count": 1,
"is_parallel": true,
"self": 0.00060438300010901,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045074700005898194,
"count": 1,
"is_parallel": true,
"self": 0.00045074700005898194
},
"communicator.exchange": {
"total": 0.03397898399998667,
"count": 1,
"is_parallel": true,
"self": 0.03397898399998667
},
"steps_from_proto": {
"total": 0.002044630000000325,
"count": 1,
"is_parallel": true,
"self": 0.00041361200010214816,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016310179998981766,
"count": 10,
"is_parallel": true,
"self": 0.0016310179998981766
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 251.22719790499536,
"count": 18200,
"is_parallel": true,
"self": 10.038808819992482,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.580484879999176,
"count": 18200,
"is_parallel": true,
"self": 5.580484879999176
},
"communicator.exchange": {
"total": 201.86685442198632,
"count": 18200,
"is_parallel": true,
"self": 201.86685442198632
},
"steps_from_proto": {
"total": 33.74104978301739,
"count": 18200,
"is_parallel": true,
"self": 6.941218813071828,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.79983096994556,
"count": 182000,
"is_parallel": true,
"self": 26.79983096994556
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010194200012847432,
"count": 1,
"self": 0.00010194200012847432,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 501.5445581001038,
"count": 421299,
"is_parallel": true,
"self": 10.876799148078362,
"children": {
"process_trajectory": {
"total": 262.66875924202714,
"count": 421299,
"is_parallel": true,
"self": 261.54166943502696,
"children": {
"RLTrainer._checkpoint": {
"total": 1.127089807000175,
"count": 4,
"is_parallel": true,
"self": 1.127089807000175
}
}
},
"_update_policy": {
"total": 227.99899970999832,
"count": 90,
"is_parallel": true,
"self": 89.02975011899343,
"children": {
"TorchPPOOptimizer.update": {
"total": 138.96924959100488,
"count": 6116,
"is_parallel": true,
"self": 138.96924959100488
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14140010500000244,
"count": 1,
"self": 0.0008762459999616112,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14052385900004083,
"count": 1,
"self": 0.14052385900004083
}
}
}
}
}
}
}