dfalvearg's picture
First Push
cad9b5f
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9268140196800232,
"min": 0.9268140196800232,
"max": 2.855933904647827,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8839.025390625,
"min": 8839.025390625,
"max": 29279.03515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.571138381958008,
"min": 0.43291130661964417,
"max": 12.571138381958008,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2451.3720703125,
"min": 83.98479461669922,
"max": 2533.017578125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06852294416703265,
"min": 0.0643664756487352,
"max": 0.07555206218708989,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2740917766681306,
"min": 0.2574659025949408,
"max": 0.37776031093544943,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1968971065738622,
"min": 0.18861382280238995,
"max": 0.28641239387147566,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7875884262954488,
"min": 0.7544552912095598,
"max": 1.4320619693573782,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.954545454545453,
"min": 4.25,
"max": 25.10909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1098.0,
"min": 187.0,
"max": 1381.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.954545454545453,
"min": 4.25,
"max": 25.10909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1098.0,
"min": 187.0,
"max": 1381.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691511261",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691511744"
},
"total": 483.1372813500002,
"count": 1,
"self": 0.4370653770001809,
"children": {
"run_training.setup": {
"total": 0.043954155000164974,
"count": 1,
"self": 0.043954155000164974
},
"TrainerController.start_learning": {
"total": 482.65626181799985,
"count": 1,
"self": 0.5749671340056466,
"children": {
"TrainerController._reset_env": {
"total": 5.76555028100006,
"count": 1,
"self": 5.76555028100006
},
"TrainerController.advance": {
"total": 476.1732656819943,
"count": 18200,
"self": 0.29236147198935214,
"children": {
"env_step": {
"total": 475.8809042100049,
"count": 18200,
"self": 346.24379427998974,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.3422109109813,
"count": 18200,
"self": 1.7552541689640293,
"children": {
"TorchPolicy.evaluate": {
"total": 127.58695674201726,
"count": 18200,
"self": 127.58695674201726
}
}
},
"workers": {
"total": 0.2948990190338918,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 480.96523598499584,
"count": 18200,
"is_parallel": true,
"self": 227.02921690798416,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0071368840001468925,
"count": 1,
"is_parallel": true,
"self": 0.005388241000218841,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017486429999280517,
"count": 10,
"is_parallel": true,
"self": 0.0017486429999280517
}
}
},
"UnityEnvironment.step": {
"total": 0.03755015799993089,
"count": 1,
"is_parallel": true,
"self": 0.0006115869998666312,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000354626999978791,
"count": 1,
"is_parallel": true,
"self": 0.000354626999978791
},
"communicator.exchange": {
"total": 0.03305041500016159,
"count": 1,
"is_parallel": true,
"self": 0.03305041500016159
},
"steps_from_proto": {
"total": 0.003533528999923874,
"count": 1,
"is_parallel": true,
"self": 0.00040505199990548135,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0031284770000183926,
"count": 10,
"is_parallel": true,
"self": 0.0031284770000183926
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 253.93601907701168,
"count": 18199,
"is_parallel": true,
"self": 10.669701483980816,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.404905561010764,
"count": 18199,
"is_parallel": true,
"self": 5.404905561010764
},
"communicator.exchange": {
"total": 201.3711725210253,
"count": 18199,
"is_parallel": true,
"self": 201.3711725210253
},
"steps_from_proto": {
"total": 36.4902395109948,
"count": 18199,
"is_parallel": true,
"self": 6.753751256041369,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.73648825495343,
"count": 181990,
"is_parallel": true,
"self": 29.73648825495343
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.4939999977723346e-05,
"count": 1,
"self": 3.4939999977723346e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 472.5716126759662,
"count": 451638,
"is_parallel": true,
"self": 10.01848530098573,
"children": {
"process_trajectory": {
"total": 258.9374339869803,
"count": 451638,
"is_parallel": true,
"self": 257.6804409699803,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2569930170000134,
"count": 4,
"is_parallel": true,
"self": 1.2569930170000134
}
}
},
"_update_policy": {
"total": 203.61569338800018,
"count": 90,
"is_parallel": true,
"self": 79.83269887000642,
"children": {
"TorchPPOOptimizer.update": {
"total": 123.78299451799376,
"count": 4587,
"is_parallel": true,
"self": 123.78299451799376
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14244378099988353,
"count": 1,
"self": 0.0009293599998727586,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14151442100001077,
"count": 1,
"self": 0.14151442100001077
}
}
}
}
}
}
}