anthonyx's picture
snowblall
af20b73
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5489891171455383,
"min": 0.5391390919685364,
"max": 2.871898889541626,
"count": 80
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5235.708984375,
"min": 5150.0263671875,
"max": 29442.70703125,
"count": 80
},
"SnowballTarget.Step.mean": {
"value": 799944.0,
"min": 9952.0,
"max": 799944.0,
"count": 80
},
"SnowballTarget.Step.sum": {
"value": 799944.0,
"min": 9952.0,
"max": 799944.0,
"count": 80
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.8124263286590576,
"min": 0.18106575310230255,
"max": 2.8432693481445312,
"count": 80
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 545.6107177734375,
"min": 35.12675476074219,
"max": 579.7280883789062,
"count": 80
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 80
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 80
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06329568701895529,
"min": 0.06084093116169599,
"max": 0.080345641567107,
"count": 80
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25318274807582114,
"min": 0.24482596428567252,
"max": 0.38094423504491504,
"count": 80
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.11787128579967163,
"min": 0.06393787799991996,
"max": 0.16546367836349152,
"count": 80
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.4714851431986865,
"min": 0.2557515119996798,
"max": 0.8273183918174576,
"count": 80
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.795599401499997e-06,
"min": 1.795599401499997e-06,
"max": 0.0002979705006765,
"count": 80
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 7.182397605999988e-06,
"min": 7.182397605999988e-06,
"max": 0.0014712900095699996,
"count": 80
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10059850000000001,
"min": 0.10059850000000001,
"max": 0.19932350000000001,
"count": 80
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40239400000000003,
"min": 0.40239400000000003,
"max": 0.9904299999999999,
"count": 80
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.9865149999999955e-05,
"min": 3.9865149999999955e-05,
"max": 0.00496624265,
"count": 80
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00015946059999999982,
"min": 0.00015946059999999982,
"max": 0.024522457,
"count": 80
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.34090909090909,
"min": 3.3636363636363638,
"max": 27.977272727272727,
"count": 80
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1203.0,
"min": 148.0,
"max": 1530.0,
"count": 80
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.34090909090909,
"min": 3.3636363636363638,
"max": 27.977272727272727,
"count": 80
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1203.0,
"min": 148.0,
"max": 1530.0,
"count": 80
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 80
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 80
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679807899",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679809763"
},
"total": 1863.9970559430003,
"count": 1,
"self": 0.426549027999954,
"children": {
"run_training.setup": {
"total": 0.10871013500036497,
"count": 1,
"self": 0.10871013500036497
},
"TrainerController.start_learning": {
"total": 1863.46179678,
"count": 1,
"self": 2.148878893890469,
"children": {
"TrainerController._reset_env": {
"total": 7.154708530999869,
"count": 1,
"self": 7.154708530999869
},
"TrainerController.advance": {
"total": 1854.0224671311094,
"count": 72741,
"self": 1.0669026770256096,
"children": {
"env_step": {
"total": 1852.9555644540837,
"count": 72741,
"self": 1337.2729332359859,
"children": {
"SubprocessEnvManager._take_step": {
"total": 514.6313742150601,
"count": 72741,
"self": 7.753451677128851,
"children": {
"TorchPolicy.evaluate": {
"total": 506.8779225379312,
"count": 72741,
"self": 506.8779225379312
}
}
},
"workers": {
"total": 1.0512570030377901,
"count": 72741,
"self": 0.0,
"children": {
"worker_root": {
"total": 1857.5596979179281,
"count": 72741,
"is_parallel": true,
"self": 857.4346367469634,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00507375500001217,
"count": 1,
"is_parallel": true,
"self": 0.0007800979997227842,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004293657000289386,
"count": 10,
"is_parallel": true,
"self": 0.004293657000289386
}
}
},
"UnityEnvironment.step": {
"total": 0.03627138899992133,
"count": 1,
"is_parallel": true,
"self": 0.0005611309993582836,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002928580001935188,
"count": 1,
"is_parallel": true,
"self": 0.0002928580001935188
},
"communicator.exchange": {
"total": 0.03341232900038449,
"count": 1,
"is_parallel": true,
"self": 0.03341232900038449
},
"steps_from_proto": {
"total": 0.002005070999985037,
"count": 1,
"is_parallel": true,
"self": 0.0004571039985421521,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015479670014428848,
"count": 10,
"is_parallel": true,
"self": 0.0015479670014428848
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1000.1250611709647,
"count": 72740,
"is_parallel": true,
"self": 38.31052930991882,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.38248344702015,
"count": 72740,
"is_parallel": true,
"self": 21.38248344702015
},
"communicator.exchange": {
"total": 811.4177478579913,
"count": 72740,
"is_parallel": true,
"self": 811.4177478579913
},
"steps_from_proto": {
"total": 129.0143005560344,
"count": 72740,
"is_parallel": true,
"self": 25.275552665940722,
"children": {
"_process_rank_one_or_two_observation": {
"total": 103.73874789009369,
"count": 727400,
"is_parallel": true,
"self": 103.73874789009369
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011620300028880592,
"count": 1,
"self": 0.00011620300028880592,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1840.4237638809386,
"count": 1656742,
"is_parallel": true,
"self": 39.9274553978089,
"children": {
"process_trajectory": {
"total": 1020.404982761132,
"count": 1656742,
"is_parallel": true,
"self": 1016.1123666211315,
"children": {
"RLTrainer._checkpoint": {
"total": 4.2926161400005185,
"count": 16,
"is_parallel": true,
"self": 4.2926161400005185
}
}
},
"_update_policy": {
"total": 780.0913257219977,
"count": 363,
"is_parallel": true,
"self": 290.21580983597596,
"children": {
"TorchPPOOptimizer.update": {
"total": 489.87551588602173,
"count": 18504,
"is_parallel": true,
"self": 489.87551588602173
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1356260210000073,
"count": 1,
"self": 0.0012752570000884589,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13435076399991885,
"count": 1,
"self": 0.13435076399991885
}
}
}
}
}
}
}