Fer14's picture
First Push
eaab1b4
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7248296737670898,
"min": 0.6775791049003601,
"max": 2.8754336833953857,
"count": 60
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7367.1689453125,
"min": 6802.60693359375,
"max": 29478.9453125,
"count": 60
},
"SnowballTarget.Step.mean": {
"value": 599936.0,
"min": 9952.0,
"max": 599936.0,
"count": 60
},
"SnowballTarget.Step.sum": {
"value": 599936.0,
"min": 9952.0,
"max": 599936.0,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.834306716918945,
"min": 0.29210716485977173,
"max": 13.961349487304688,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2683.85546875,
"min": 56.668792724609375,
"max": 2850.8193359375,
"count": 60
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 60
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 60
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06631343917529996,
"min": 0.06228988266529461,
"max": 0.07569344408739376,
"count": 60
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2652537567011998,
"min": 0.24915953066117844,
"max": 0.3784672204369688,
"count": 60
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15818140031222036,
"min": 0.09367230314505744,
"max": 0.2929079496100837,
"count": 60
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6327256012488814,
"min": 0.37468921258022975,
"max": 1.323321575043248,
"count": 60
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.4940991686666686e-06,
"min": 2.4940991686666686e-06,
"max": 0.00029729400090199997,
"count": 60
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 9.976396674666675e-06,
"min": 9.976396674666675e-06,
"max": 0.00146172001276,
"count": 60
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10083133333333334,
"min": 0.10083133333333334,
"max": 0.19909800000000002,
"count": 60
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40332533333333337,
"min": 0.40332533333333337,
"max": 0.9872400000000001,
"count": 60
},
"SnowballTarget.Policy.Beta.mean": {
"value": 5.148353333333336e-05,
"min": 5.148353333333336e-05,
"max": 0.0049549902,
"count": 60
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00020593413333333345,
"min": 0.00020593413333333345,
"max": 0.024363276,
"count": 60
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.022727272727273,
"min": 2.772727272727273,
"max": 27.327272727272728,
"count": 60
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1189.0,
"min": 122.0,
"max": 1503.0,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.022727272727273,
"min": 2.772727272727273,
"max": 27.327272727272728,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1189.0,
"min": 122.0,
"max": 1503.0,
"count": 60
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 60
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 60
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679314522",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679315886"
},
"total": 1364.0446387240002,
"count": 1,
"self": 0.4338314750000336,
"children": {
"run_training.setup": {
"total": 0.11963858699999719,
"count": 1,
"self": 0.11963858699999719
},
"TrainerController.start_learning": {
"total": 1363.491168662,
"count": 1,
"self": 1.7064734049852177,
"children": {
"TrainerController._reset_env": {
"total": 9.982239085999993,
"count": 1,
"self": 9.982239085999993
},
"TrainerController.advance": {
"total": 1351.672203885015,
"count": 54601,
"self": 0.8388275130123475,
"children": {
"env_step": {
"total": 1350.8333763720027,
"count": 54601,
"self": 980.8229017299849,
"children": {
"SubprocessEnvManager._take_step": {
"total": 369.20415079601764,
"count": 54601,
"self": 6.335727415021495,
"children": {
"TorchPolicy.evaluate": {
"total": 362.86842338099615,
"count": 54601,
"self": 362.86842338099615
}
}
},
"workers": {
"total": 0.8063238460001685,
"count": 54601,
"self": 0.0,
"children": {
"worker_root": {
"total": 1359.0819031600113,
"count": 54601,
"is_parallel": true,
"self": 637.0596981149951,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005475850000010496,
"count": 1,
"is_parallel": true,
"self": 0.00373308699994368,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001742763000066816,
"count": 10,
"is_parallel": true,
"self": 0.001742763000066816
}
}
},
"UnityEnvironment.step": {
"total": 0.035366794999987405,
"count": 1,
"is_parallel": true,
"self": 0.0005652750000137985,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002833429999782311,
"count": 1,
"is_parallel": true,
"self": 0.0002833429999782311
},
"communicator.exchange": {
"total": 0.03272804499999893,
"count": 1,
"is_parallel": true,
"self": 0.03272804499999893
},
"steps_from_proto": {
"total": 0.0017901319999964471,
"count": 1,
"is_parallel": true,
"self": 0.00037849499994990765,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014116370000465395,
"count": 10,
"is_parallel": true,
"self": 0.0014116370000465395
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 722.0222050450162,
"count": 54600,
"is_parallel": true,
"self": 28.610750178040803,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.462808761952402,
"count": 54600,
"is_parallel": true,
"self": 15.462808761952402
},
"communicator.exchange": {
"total": 585.9845247029803,
"count": 54600,
"is_parallel": true,
"self": 585.9845247029803
},
"steps_from_proto": {
"total": 91.96412140204285,
"count": 54600,
"is_parallel": true,
"self": 18.332462579085444,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.6316588229574,
"count": 546000,
"is_parallel": true,
"self": 73.6316588229574
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00035181799989914,
"count": 1,
"self": 0.00035181799989914,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1342.3251433950538,
"count": 1186847,
"is_parallel": true,
"self": 28.085934761043745,
"children": {
"process_trajectory": {
"total": 740.5974856450091,
"count": 1186847,
"is_parallel": true,
"self": 736.9875699670091,
"children": {
"RLTrainer._checkpoint": {
"total": 3.609915677999993,
"count": 12,
"is_parallel": true,
"self": 3.609915677999993
}
}
},
"_update_policy": {
"total": 573.6417229890011,
"count": 272,
"is_parallel": true,
"self": 210.1120015600037,
"children": {
"TorchPPOOptimizer.update": {
"total": 363.5297214289974,
"count": 13869,
"is_parallel": true,
"self": 363.5297214289974
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1299004679999598,
"count": 1,
"self": 0.0009268559999782156,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12897361199998159,
"count": 1,
"self": 0.12897361199998159
}
}
}
}
}
}
}