Sri Kanthavel
first training of SnowballTarget
2175934
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9865233898162842,
"min": 0.9769531488418579,
"max": 2.8804733753204346,
"count": 40
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5002.66015625,
"min": 4423.7197265625,
"max": 14955.41796875,
"count": 40
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 4976.0,
"max": 199984.0,
"count": 40
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 4976.0,
"max": 199984.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.945466041564941,
"min": 0.12383613735437393,
"max": 12.945466041564941,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1268.6556396484375,
"min": 12.012104988098145,
"max": 1368.72998046875,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 4378.0,
"min": 4378.0,
"max": 6567.0,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06119467089450279,
"min": 0.05589344148357472,
"max": 0.07844825871128991,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.12238934178900558,
"min": 0.11178688296714943,
"max": 0.21994142656388016,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18320888412349365,
"min": 0.09737183046801126,
"max": 0.28807831151423113,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.3664177682469873,
"min": 0.19474366093602252,
"max": 0.8642349345426934,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 4.782098406000003e-06,
"min": 4.782098406000003e-06,
"max": 0.000295182001606,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 9.564196812000006e-06,
"min": 9.564196812000006e-06,
"max": 0.0008211960262680001,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.101594,
"min": 0.101594,
"max": 0.19839400000000001,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.203188,
"min": 0.203188,
"max": 0.573732,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 8.954060000000007e-05,
"min": 8.954060000000007e-05,
"max": 0.0049198606,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00017908120000000014,
"min": 0.00017908120000000014,
"max": 0.013689226799999999,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.136363636363637,
"min": 3.0454545454545454,
"max": 26.136363636363637,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 575.0,
"min": 67.0,
"max": 847.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.136363636363637,
"min": 3.0454545454545454,
"max": 26.136363636363637,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 575.0,
"min": 67.0,
"max": 847.0,
"count": 40
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691829015",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691829557"
},
"total": 542.1500468789999,
"count": 1,
"self": 0.4866441809998605,
"children": {
"run_training.setup": {
"total": 0.06905229799997414,
"count": 1,
"self": 0.06905229799997414
},
"TrainerController.start_learning": {
"total": 541.5943504,
"count": 1,
"self": 0.7070959789896278,
"children": {
"TrainerController._reset_env": {
"total": 4.827877537000063,
"count": 1,
"self": 4.827877537000063
},
"TrainerController.advance": {
"total": 535.9039368490103,
"count": 18202,
"self": 0.34381268700224155,
"children": {
"env_step": {
"total": 535.560124162008,
"count": 18202,
"self": 391.3465156599858,
"children": {
"SubprocessEnvManager._take_step": {
"total": 143.86702192601456,
"count": 18202,
"self": 2.041648620021874,
"children": {
"TorchPolicy.evaluate": {
"total": 141.82537330599268,
"count": 18202,
"self": 141.82537330599268
}
}
},
"workers": {
"total": 0.34658657600766674,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 539.6182793180087,
"count": 18202,
"is_parallel": true,
"self": 251.21901014401578,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00708886799998254,
"count": 1,
"is_parallel": true,
"self": 0.004395590999934029,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026932770000485107,
"count": 10,
"is_parallel": true,
"self": 0.0026932770000485107
}
}
},
"UnityEnvironment.step": {
"total": 0.05557012199994915,
"count": 1,
"is_parallel": true,
"self": 0.0007094409999126583,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00033372799998687697,
"count": 1,
"is_parallel": true,
"self": 0.00033372799998687697
},
"communicator.exchange": {
"total": 0.05193056800010254,
"count": 1,
"is_parallel": true,
"self": 0.05193056800010254
},
"steps_from_proto": {
"total": 0.0025963849999470767,
"count": 1,
"is_parallel": true,
"self": 0.0004492609999715569,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00214712399997552,
"count": 10,
"is_parallel": true,
"self": 0.00214712399997552
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 288.39926917399293,
"count": 18201,
"is_parallel": true,
"self": 11.798202670982278,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.209508186006019,
"count": 18201,
"is_parallel": true,
"self": 6.209508186006019
},
"communicator.exchange": {
"total": 228.06536430600488,
"count": 18201,
"is_parallel": true,
"self": 228.06536430600488
},
"steps_from_proto": {
"total": 42.32619401099976,
"count": 18201,
"is_parallel": true,
"self": 7.969513511016885,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.35668049998287,
"count": 182010,
"is_parallel": true,
"self": 34.35668049998287
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001987709999866638,
"count": 1,
"self": 0.0001987709999866638,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 531.5191160680095,
"count": 508705,
"is_parallel": true,
"self": 12.16270045904389,
"children": {
"process_trajectory": {
"total": 296.84100704196635,
"count": 508705,
"is_parallel": true,
"self": 295.6081481159663,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2328589260000626,
"count": 4,
"is_parallel": true,
"self": 1.2328589260000626
}
}
},
"_update_policy": {
"total": 222.5154085669992,
"count": 90,
"is_parallel": true,
"self": 87.93632522800249,
"children": {
"TorchPPOOptimizer.update": {
"total": 134.57908333899672,
"count": 4587,
"is_parallel": true,
"self": 134.57908333899672
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15524126400009663,
"count": 1,
"self": 0.0009420390001650958,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15429922499993154,
"count": 1,
"self": 0.15429922499993154
}
}
}
}
}
}
}