tomoohive's picture
First Push
6a4948f
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0919021368026733,
"min": 1.0919021368026733,
"max": 2.8718159198760986,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10413.470703125,
"min": 10413.470703125,
"max": 29378.677734375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.205342292785645,
"min": 0.32794323563575745,
"max": 12.205342292785645,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2380.041748046875,
"min": 63.62098693847656,
"max": 2453.032958984375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07206723039488162,
"min": 0.063662699201029,
"max": 0.07418873627290279,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2882689215795265,
"min": 0.254650796804116,
"max": 0.36990817307927426,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19978574787576991,
"min": 0.1317103758577586,
"max": 0.2864143833079759,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7991429915030797,
"min": 0.5268415034310344,
"max": 1.3952587837097692,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.045454545454547,
"min": 3.7954545454545454,
"max": 24.09090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1058.0,
"min": 167.0,
"max": 1325.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.045454545454547,
"min": 3.7954545454545454,
"max": 24.09090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1058.0,
"min": 167.0,
"max": 1325.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691026063",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691026532"
},
"total": 468.8080965260001,
"count": 1,
"self": 0.4371717929999477,
"children": {
"run_training.setup": {
"total": 0.04401449500005583,
"count": 1,
"self": 0.04401449500005583
},
"TrainerController.start_learning": {
"total": 468.3269102380001,
"count": 1,
"self": 0.5492553209924154,
"children": {
"TrainerController._reset_env": {
"total": 6.370661489999975,
"count": 1,
"self": 6.370661489999975
},
"TrainerController.advance": {
"total": 461.2668185330075,
"count": 18199,
"self": 0.2605864760047325,
"children": {
"env_step": {
"total": 461.00623205700276,
"count": 18199,
"self": 338.1891974329907,
"children": {
"SubprocessEnvManager._take_step": {
"total": 122.5491257410057,
"count": 18199,
"self": 1.697311537996029,
"children": {
"TorchPolicy.evaluate": {
"total": 120.85181420300967,
"count": 18199,
"self": 120.85181420300967
}
}
},
"workers": {
"total": 0.2679088830063847,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 466.7269492059912,
"count": 18199,
"is_parallel": true,
"self": 220.8572072269883,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007014021000031789,
"count": 1,
"is_parallel": true,
"self": 0.004685324999968543,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002328696000063246,
"count": 10,
"is_parallel": true,
"self": 0.002328696000063246
}
}
},
"UnityEnvironment.step": {
"total": 0.03540059699992071,
"count": 1,
"is_parallel": true,
"self": 0.00039216599998326274,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003667629999881683,
"count": 1,
"is_parallel": true,
"self": 0.0003667629999881683
},
"communicator.exchange": {
"total": 0.03251359099999718,
"count": 1,
"is_parallel": true,
"self": 0.03251359099999718
},
"steps_from_proto": {
"total": 0.0021280769999521,
"count": 1,
"is_parallel": true,
"self": 0.0003943170000866303,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017337599998654696,
"count": 10,
"is_parallel": true,
"self": 0.0017337599998654696
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 245.86974197900292,
"count": 18198,
"is_parallel": true,
"self": 10.282672206991833,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.200049795001519,
"count": 18198,
"is_parallel": true,
"self": 5.200049795001519
},
"communicator.exchange": {
"total": 195.13692453400859,
"count": 18198,
"is_parallel": true,
"self": 195.13692453400859
},
"steps_from_proto": {
"total": 35.25009544300099,
"count": 18198,
"is_parallel": true,
"self": 6.368682779991104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.881412663009883,
"count": 181980,
"is_parallel": true,
"self": 28.881412663009883
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.812300019490067e-05,
"count": 1,
"self": 3.812300019490067e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 457.8798561490679,
"count": 419109,
"is_parallel": true,
"self": 9.302510940054617,
"children": {
"process_trajectory": {
"total": 247.7934828320133,
"count": 419109,
"is_parallel": true,
"self": 247.10215578501345,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6913270469998452,
"count": 4,
"is_parallel": true,
"self": 0.6913270469998452
}
}
},
"_update_policy": {
"total": 200.783862377,
"count": 90,
"is_parallel": true,
"self": 83.53291189899699,
"children": {
"TorchPPOOptimizer.update": {
"total": 117.250950478003,
"count": 4587,
"is_parallel": true,
"self": 117.250950478003
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14013677100001587,
"count": 1,
"self": 0.0008861539999998058,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13925061700001606,
"count": 1,
"self": 0.13925061700001606
}
}
}
}
}
}
}