sagarsdesai's picture
First Push
bc8b400
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8919032216072083,
"min": 0.8919032216072083,
"max": 2.872331380844116,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8525.703125,
"min": 8525.703125,
"max": 29415.546875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.914673805236816,
"min": 0.33305370807647705,
"max": 12.914673805236816,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2518.361328125,
"min": 64.61241912841797,
"max": 2608.82763671875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07152451092021211,
"min": 0.0632748559934884,
"max": 0.0737611127662211,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28609804368084846,
"min": 0.2530994239739536,
"max": 0.36121205297440256,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1970834562591478,
"min": 0.11382793169354508,
"max": 0.30025076533065126,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7883338250365912,
"min": 0.4553117267741803,
"max": 1.5012538266532562,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.386363636363637,
"min": 3.3636363636363638,
"max": 25.477272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1117.0,
"min": 148.0,
"max": 1376.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.386363636363637,
"min": 3.3636363636363638,
"max": 25.477272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1117.0,
"min": 148.0,
"max": 1376.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691851095",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691851595"
},
"total": 499.45697965499994,
"count": 1,
"self": 0.43344772800003284,
"children": {
"run_training.setup": {
"total": 0.04479314799993972,
"count": 1,
"self": 0.04479314799993972
},
"TrainerController.start_learning": {
"total": 498.97873877899997,
"count": 1,
"self": 0.5736089910049031,
"children": {
"TrainerController._reset_env": {
"total": 4.130036519999976,
"count": 1,
"self": 4.130036519999976
},
"TrainerController.advance": {
"total": 494.1281509109949,
"count": 18201,
"self": 0.3010479290155672,
"children": {
"env_step": {
"total": 493.82710298197935,
"count": 18201,
"self": 360.8315152299846,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.69056835999368,
"count": 18201,
"self": 1.8557931500096174,
"children": {
"TorchPolicy.evaluate": {
"total": 130.83477520998406,
"count": 18201,
"self": 130.83477520998406
}
}
},
"workers": {
"total": 0.30501939200109973,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 497.1488135510125,
"count": 18201,
"is_parallel": true,
"self": 232.50329652901382,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00660474799997246,
"count": 1,
"is_parallel": true,
"self": 0.004986788999985947,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001617958999986513,
"count": 10,
"is_parallel": true,
"self": 0.001617958999986513
}
}
},
"UnityEnvironment.step": {
"total": 0.04762846100004481,
"count": 1,
"is_parallel": true,
"self": 0.0006828740001765254,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003280439999571172,
"count": 1,
"is_parallel": true,
"self": 0.0003280439999571172
},
"communicator.exchange": {
"total": 0.04448022199994739,
"count": 1,
"is_parallel": true,
"self": 0.04448022199994739
},
"steps_from_proto": {
"total": 0.0021373209999637766,
"count": 1,
"is_parallel": true,
"self": 0.00037354499988850876,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017637760000752678,
"count": 10,
"is_parallel": true,
"self": 0.0017637760000752678
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 264.64551702199867,
"count": 18200,
"is_parallel": true,
"self": 11.033877337999797,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.676095523998697,
"count": 18200,
"is_parallel": true,
"self": 5.676095523998697
},
"communicator.exchange": {
"total": 208.52117158900228,
"count": 18200,
"is_parallel": true,
"self": 208.52117158900228
},
"steps_from_proto": {
"total": 39.4143725709979,
"count": 18200,
"is_parallel": true,
"self": 7.297273763980456,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.11709880701744,
"count": 182000,
"is_parallel": true,
"self": 32.11709880701744
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015133300007619255,
"count": 1,
"self": 0.00015133300007619255,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 490.0671849149844,
"count": 463704,
"is_parallel": true,
"self": 10.994759638976802,
"children": {
"process_trajectory": {
"total": 270.3266739380082,
"count": 463704,
"is_parallel": true,
"self": 269.51350201900823,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8131719189999558,
"count": 4,
"is_parallel": true,
"self": 0.8131719189999558
}
}
},
"_update_policy": {
"total": 208.74575133799942,
"count": 90,
"is_parallel": true,
"self": 81.80931167700226,
"children": {
"TorchPPOOptimizer.update": {
"total": 126.93643966099717,
"count": 4587,
"is_parallel": true,
"self": 126.93643966099717
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1467910240000947,
"count": 1,
"self": 0.000872205000177928,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14591881899991677,
"count": 1,
"self": 0.14591881899991677
}
}
}
}
}
}
}