hendoo's picture
First Push
215835a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.5867141485214233,
"min": 1.5867141485214233,
"max": 2.8526248931884766,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 16301.9013671875,
"min": 16180.974609375,
"max": 29213.732421875,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.9726896286010742,
"min": 0.14369924366474152,
"max": 0.9726896286010742,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 198.42868041992188,
"min": 27.877653121948242,
"max": 198.42868041992188,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07226602769326693,
"min": 0.06125383561943725,
"max": 0.07226602769326693,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3613301384663346,
"min": 0.245015342477749,
"max": 0.3613301384663346,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.12289813082007801,
"min": 0.04514339138689798,
"max": 0.12289813082007801,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.61449065410039,
"min": 0.1805735655475919,
"max": 0.61449065410039,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6464094511999996e-05,
"min": 1.6464094511999996e-05,
"max": 0.000283764005412,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.232047255999999e-05,
"min": 8.232047255999999e-05,
"max": 0.00127032007656,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.105488,
"min": 0.105488,
"max": 0.194588,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.52744,
"min": 0.4615520000000001,
"max": 0.92344,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002838512,
"min": 0.0002838512,
"max": 0.0047299412,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.001419256,
"min": 0.001419256,
"max": 0.021179656,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 19.181818181818183,
"min": 3.272727272727273,
"max": 19.181818181818183,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1055.0,
"min": 144.0,
"max": 1055.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 19.181818181818183,
"min": 3.272727272727273,
"max": 19.181818181818183,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1055.0,
"min": 144.0,
"max": 1055.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674155739",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674155966"
},
"total": 226.50178110400003,
"count": 1,
"self": 0.3879015110000239,
"children": {
"run_training.setup": {
"total": 0.10655167800001664,
"count": 1,
"self": 0.10655167800001664
},
"TrainerController.start_learning": {
"total": 226.007327915,
"count": 1,
"self": 0.30935101900178097,
"children": {
"TrainerController._reset_env": {
"total": 9.247164024,
"count": 1,
"self": 9.247164024
},
"TrainerController.advance": {
"total": 216.32491082999826,
"count": 9136,
"self": 0.13556149299850517,
"children": {
"env_step": {
"total": 216.18934933699975,
"count": 9136,
"self": 141.77912051799944,
"children": {
"SubprocessEnvManager._take_step": {
"total": 74.27626982800098,
"count": 9136,
"self": 0.7361855020020585,
"children": {
"TorchPolicy.evaluate": {
"total": 73.54008432599892,
"count": 9136,
"self": 16.653081111001597,
"children": {
"TorchPolicy.sample_actions": {
"total": 56.88700321499732,
"count": 9136,
"self": 56.88700321499732
}
}
}
}
},
"workers": {
"total": 0.13395899099933217,
"count": 9136,
"self": 0.0,
"children": {
"worker_root": {
"total": 225.30896656700085,
"count": 9136,
"is_parallel": true,
"self": 112.12376225799937,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005523942000024817,
"count": 1,
"is_parallel": true,
"self": 0.00303321900003084,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024907229999939773,
"count": 10,
"is_parallel": true,
"self": 0.0024907229999939773
}
}
},
"UnityEnvironment.step": {
"total": 0.03764425999997911,
"count": 1,
"is_parallel": true,
"self": 0.0006802530000129536,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000328035999984877,
"count": 1,
"is_parallel": true,
"self": 0.000328035999984877
},
"communicator.exchange": {
"total": 0.034794521000009127,
"count": 1,
"is_parallel": true,
"self": 0.034794521000009127
},
"steps_from_proto": {
"total": 0.0018414499999721556,
"count": 1,
"is_parallel": true,
"self": 0.000439483999997492,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014019659999746636,
"count": 10,
"is_parallel": true,
"self": 0.0014019659999746636
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 113.18520430900148,
"count": 9135,
"is_parallel": true,
"self": 4.24361126900223,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.6212543760017297,
"count": 9135,
"is_parallel": true,
"self": 2.6212543760017297
},
"communicator.exchange": {
"total": 90.52776511899827,
"count": 9135,
"is_parallel": true,
"self": 90.52776511899827
},
"steps_from_proto": {
"total": 15.792573544999243,
"count": 9135,
"is_parallel": true,
"self": 3.3436687989969585,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12.448904746002285,
"count": 91350,
"is_parallel": true,
"self": 12.448904746002285
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.544199998690601e-05,
"count": 1,
"self": 4.544199998690601e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 214.8135843600029,
"count": 175862,
"is_parallel": true,
"self": 4.88795159101025,
"children": {
"process_trajectory": {
"total": 122.9901314249926,
"count": 175862,
"is_parallel": true,
"self": 122.57534843899262,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4147829859999774,
"count": 2,
"is_parallel": true,
"self": 0.4147829859999774
}
}
},
"_update_policy": {
"total": 86.93550134400004,
"count": 45,
"is_parallel": true,
"self": 22.967537525999518,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.96796381800053,
"count": 2292,
"is_parallel": true,
"self": 63.96796381800053
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12585659999996324,
"count": 1,
"self": 0.0009197229999244882,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12493687700003875,
"count": 1,
"self": 0.12493687700003875
}
}
}
}
}
}
}