brightonm's picture
First Push
e940877 verified
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.3880797624588013,
"min": 1.3880797624588013,
"max": 2.8517308235168457,
"count": 13
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 14306.9375,
"min": 13513.2646484375,
"max": 29204.57421875,
"count": 13
},
"SnowballTarget.Step.mean": {
"value": 129992.0,
"min": 9952.0,
"max": 129992.0,
"count": 13
},
"SnowballTarget.Step.sum": {
"value": 129992.0,
"min": 9952.0,
"max": 129992.0,
"count": 13
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 10.128174781799316,
"min": 0.2832825779914856,
"max": 10.128174781799316,
"count": 13
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2076.27587890625,
"min": 54.956817626953125,
"max": 2076.27587890625,
"count": 13
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 13
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 13
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06962355011389318,
"min": 0.06357419257832339,
"max": 0.07463602524347188,
"count": 13
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.34811775056946587,
"min": 0.2603094176510258,
"max": 0.3731801262173594,
"count": 13
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22773249371379028,
"min": 0.11697891320321052,
"max": 0.2781759425705554,
"count": 13
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.1386624685689515,
"min": 0.4679156528128421,
"max": 1.390879712852777,
"count": 13
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.00011203206265599998,
"min": 0.00011203206265599998,
"max": 0.000291882002706,
"count": 13
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0005601603132799999,
"min": 0.000507528230824,
"max": 0.00138516003828,
"count": 13
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.13734400000000002,
"min": 0.13734400000000002,
"max": 0.19729400000000002,
"count": 13
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.6867200000000001,
"min": 0.569176,
"max": 0.96172,
"count": 13
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0018734656000000006,
"min": 0.0018734656000000006,
"max": 0.0048649706,
"count": 13
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.009367328000000003,
"min": 0.008481882400000001,
"max": 0.023089828,
"count": 13
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 21.509090909090908,
"min": 3.4318181818181817,
"max": 21.509090909090908,
"count": 13
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1183.0,
"min": 151.0,
"max": 1183.0,
"count": 13
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 21.509090909090908,
"min": 3.4318181818181817,
"max": 21.509090909090908,
"count": 13
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1183.0,
"min": 151.0,
"max": 1183.0,
"count": 13
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 13
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 13
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713175591",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713175922"
},
"total": 331.8479391139999,
"count": 1,
"self": 0.014853138999910698,
"children": {
"run_training.setup": {
"total": 0.06150647200001913,
"count": 1,
"self": 0.06150647200001913
},
"TrainerController.start_learning": {
"total": 331.771579503,
"count": 1,
"self": 1.3985338750021583,
"children": {
"TrainerController._reset_env": {
"total": 3.6751574590000473,
"count": 1,
"self": 3.6751574590000473
},
"TrainerController.advance": {
"total": 326.5891922509978,
"count": 12198,
"self": 0.21753435899802298,
"children": {
"env_step": {
"total": 326.3716578919998,
"count": 12198,
"self": 212.15058538000767,
"children": {
"SubprocessEnvManager._take_step": {
"total": 113.99187090299517,
"count": 12198,
"self": 1.1206303279833492,
"children": {
"TorchPolicy.evaluate": {
"total": 112.87124057501183,
"count": 12198,
"self": 112.87124057501183
}
}
},
"workers": {
"total": 0.2292016089969593,
"count": 12197,
"self": 0.0,
"children": {
"worker_root": {
"total": 329.7804401870036,
"count": 12197,
"is_parallel": true,
"self": 162.73411530801604,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005380938000030255,
"count": 1,
"is_parallel": true,
"self": 0.0036088850000624006,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017720529999678547,
"count": 10,
"is_parallel": true,
"self": 0.0017720529999678547
}
}
},
"UnityEnvironment.step": {
"total": 0.04277813399994557,
"count": 1,
"is_parallel": true,
"self": 0.0007616190000589995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004443739999260288,
"count": 1,
"is_parallel": true,
"self": 0.0004443739999260288
},
"communicator.exchange": {
"total": 0.03936325899996973,
"count": 1,
"is_parallel": true,
"self": 0.03936325899996973
},
"steps_from_proto": {
"total": 0.0022088819999908083,
"count": 1,
"is_parallel": true,
"self": 0.000457378000078279,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017515039999125293,
"count": 10,
"is_parallel": true,
"self": 0.0017515039999125293
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 167.04632487898755,
"count": 12196,
"is_parallel": true,
"self": 7.698381415986773,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.00134046099754,
"count": 12196,
"is_parallel": true,
"self": 4.00134046099754
},
"communicator.exchange": {
"total": 130.64722692800422,
"count": 12196,
"is_parallel": true,
"self": 130.64722692800422
},
"steps_from_proto": {
"total": 24.69937607399902,
"count": 12196,
"is_parallel": true,
"self": 4.812352262992249,
"children": {
"_process_rank_one_or_two_observation": {
"total": 19.88702381100677,
"count": 121960,
"is_parallel": true,
"self": 19.88702381100677
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.001165099999980157,
"count": 1,
"self": 0.001165099999980157,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 323.2787261299071,
"count": 508865,
"is_parallel": true,
"self": 11.001617000848796,
"children": {
"process_trajectory": {
"total": 180.47398547505838,
"count": 508865,
"is_parallel": true,
"self": 180.12943518305838,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3445502920000081,
"count": 2,
"is_parallel": true,
"self": 0.3445502920000081
}
}
},
"_update_policy": {
"total": 131.80312365399993,
"count": 61,
"is_parallel": true,
"self": 38.18519688700246,
"children": {
"TorchPPOOptimizer.update": {
"total": 93.61792676699747,
"count": 3108,
"is_parallel": true,
"self": 93.61792676699747
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.10753081799998654,
"count": 1,
"self": 0.0016270780001832463,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10590373999980329,
"count": 1,
"self": 0.10590373999980329
}
}
}
}
}
}
}