AMI0x's picture
500K Step Trainning Agent
68b7591
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6655184626579285,
"min": 0.617180585861206,
"max": 0.9582071900367737,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6420.2568359375,
"min": 5858.89501953125,
"max": 9813.0,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 499952.0,
"min": 209936.0,
"max": 499952.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 499952.0,
"min": 209936.0,
"max": 499952.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.066350936889648,
"min": 11.845895767211914,
"max": 13.117155075073242,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2665.53564453125,
"min": 2291.858642578125,
"max": 2673.39697265625,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06782656824045048,
"min": 0.061637699475635174,
"max": 0.07558078284441852,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3391328412022524,
"min": 0.2465507979025407,
"max": 0.3779039142220926,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.16957302873625474,
"min": 0.15925985475381216,
"max": 0.19927547601800338,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8478651436812737,
"min": 0.6766528339654792,
"max": 0.9778521423246346,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.1056989647999945e-06,
"min": 3.1056989647999945e-06,
"max": 0.00017668564110480002,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5528494823999972e-05,
"min": 1.5528494823999972e-05,
"max": 0.000853728215424,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10103520000000002,
"min": 0.10103520000000002,
"max": 0.15889520000000001,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5051760000000001,
"min": 0.4120608,
"max": 0.7845760000000002,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.165647999999992e-05,
"min": 6.165647999999992e-05,
"max": 0.002948870480000001,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003082823999999996,
"min": 0.0003082823999999996,
"max": 0.0142503424,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.01818181818182,
"min": 23.272727272727273,
"max": 26.01818181818182,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1431.0,
"min": 1033.0,
"max": 1431.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.01818181818182,
"min": 23.272727272727273,
"max": 26.01818181818182,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1431.0,
"min": 1033.0,
"max": 1431.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682358144",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682358916"
},
"total": 771.782464225,
"count": 1,
"self": 0.42619025900012275,
"children": {
"run_training.setup": {
"total": 0.12090538699999342,
"count": 1,
"self": 0.12090538699999342
},
"TrainerController.start_learning": {
"total": 771.2353685789999,
"count": 1,
"self": 0.9749921189966244,
"children": {
"TrainerController._reset_env": {
"total": 3.78697043499983,
"count": 1,
"self": 3.78697043499983
},
"TrainerController.advance": {
"total": 766.3194630150033,
"count": 27277,
"self": 0.4873033090048011,
"children": {
"env_step": {
"total": 765.8321597059985,
"count": 27277,
"self": 560.3379320839692,
"children": {
"SubprocessEnvManager._take_step": {
"total": 205.00281449902013,
"count": 27277,
"self": 2.854428264005719,
"children": {
"TorchPolicy.evaluate": {
"total": 202.1483862350144,
"count": 27277,
"self": 202.1483862350144
}
}
},
"workers": {
"total": 0.4914131230091243,
"count": 27277,
"self": 0.0,
"children": {
"worker_root": {
"total": 768.7621924489877,
"count": 27277,
"is_parallel": true,
"self": 348.3900992389922,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020266750000246248,
"count": 1,
"is_parallel": true,
"self": 0.000663230999634834,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013634440003897907,
"count": 10,
"is_parallel": true,
"self": 0.0013634440003897907
}
}
},
"UnityEnvironment.step": {
"total": 0.03653779000001123,
"count": 1,
"is_parallel": true,
"self": 0.000623500000074273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00035205000017413113,
"count": 1,
"is_parallel": true,
"self": 0.00035205000017413113
},
"communicator.exchange": {
"total": 0.033465286999899035,
"count": 1,
"is_parallel": true,
"self": 0.033465286999899035
},
"steps_from_proto": {
"total": 0.002096952999863788,
"count": 1,
"is_parallel": true,
"self": 0.00044193199937581085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016550210004879773,
"count": 10,
"is_parallel": true,
"self": 0.0016550210004879773
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 420.37209320999546,
"count": 27276,
"is_parallel": true,
"self": 16.572916214011002,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.062697829995159,
"count": 27276,
"is_parallel": true,
"self": 9.062697829995159
},
"communicator.exchange": {
"total": 341.0861296830112,
"count": 27276,
"is_parallel": true,
"self": 341.0861296830112
},
"steps_from_proto": {
"total": 53.65034948297807,
"count": 27276,
"is_parallel": true,
"self": 11.027815503917736,
"children": {
"_process_rank_one_or_two_observation": {
"total": 42.622533979060336,
"count": 272760,
"is_parallel": true,
"self": 42.622533979060336
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.132499993327656e-05,
"count": 1,
"self": 6.132499993327656e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 760.2009560629922,
"count": 695956,
"is_parallel": true,
"self": 18.176444329957803,
"children": {
"process_trajectory": {
"total": 419.70010413003524,
"count": 695956,
"is_parallel": true,
"self": 417.8537270100355,
"children": {
"RLTrainer._checkpoint": {
"total": 1.8463771199997154,
"count": 6,
"is_parallel": true,
"self": 1.8463771199997154
}
}
},
"_update_policy": {
"total": 322.32440760299914,
"count": 136,
"is_parallel": true,
"self": 115.05255194398,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.27185565901914,
"count": 6930,
"is_parallel": true,
"self": 207.27185565901914
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15388168500021493,
"count": 1,
"self": 0.002220347000275069,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15166133799993986,
"count": 1,
"self": 0.15166133799993986
}
}
}
}
}
}
}