electricwapiti's picture
First Push
e02de01 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7947409749031067,
"min": 0.7947409749031067,
"max": 2.8620705604553223,
"count": 40
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8077.7470703125,
"min": 7707.5380859375,
"max": 29373.4296875,
"count": 40
},
"SnowballTarget.Step.mean": {
"value": 399992.0,
"min": 9952.0,
"max": 399992.0,
"count": 40
},
"SnowballTarget.Step.sum": {
"value": 399992.0,
"min": 9952.0,
"max": 399992.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.424864768981934,
"min": 0.30811837315559387,
"max": 13.424864768981934,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2617.8486328125,
"min": 59.77496337890625,
"max": 2741.96337890625,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06454560666472889,
"min": 0.06074054387920792,
"max": 0.074167566525327,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25818242665891555,
"min": 0.24421002876524833,
"max": 0.370837832626635,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20385289732732026,
"min": 0.14457374546270563,
"max": 0.27931074681235296,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.815411589309281,
"min": 0.5782949818508225,
"max": 1.3713825865119111,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.5940987029999965e-06,
"min": 2.5940987029999965e-06,
"max": 0.000197294001353,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.0376394811999986e-05,
"min": 1.0376394811999986e-05,
"max": 0.00096172001914,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.101297,
"min": 0.101297,
"max": 0.19864700000000002,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.405188,
"min": 0.405188,
"max": 0.98086,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.472029999999992e-05,
"min": 7.472029999999992e-05,
"max": 0.004932485299999999,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00029888119999999966,
"min": 0.00029888119999999966,
"max": 0.024044914,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.431818181818183,
"min": 3.8181818181818183,
"max": 26.431818181818183,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1163.0,
"min": 168.0,
"max": 1449.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.431818181818183,
"min": 3.8181818181818183,
"max": 26.431818181818183,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1163.0,
"min": 168.0,
"max": 1449.0,
"count": 40
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723831439",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1723832665"
},
"total": 1225.6518520080003,
"count": 1,
"self": 0.5952232290005668,
"children": {
"run_training.setup": {
"total": 0.09091497700001128,
"count": 1,
"self": 0.09091497700001128
},
"TrainerController.start_learning": {
"total": 1224.9657138019998,
"count": 1,
"self": 1.6150285529806752,
"children": {
"TrainerController._reset_env": {
"total": 4.874541996000062,
"count": 1,
"self": 4.874541996000062
},
"TrainerController.advance": {
"total": 1218.3798316620187,
"count": 36400,
"self": 0.8544763460145077,
"children": {
"env_step": {
"total": 1217.5253553160042,
"count": 36400,
"self": 939.4780412090043,
"children": {
"SubprocessEnvManager._take_step": {
"total": 277.20442027199715,
"count": 36400,
"self": 4.874734447951596,
"children": {
"TorchPolicy.evaluate": {
"total": 272.32968582404555,
"count": 36400,
"self": 272.32968582404555
}
}
},
"workers": {
"total": 0.8428938350026556,
"count": 36400,
"self": 0.0,
"children": {
"worker_root": {
"total": 1220.97974874999,
"count": 36400,
"is_parallel": true,
"self": 588.2428607999832,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008710908000011841,
"count": 1,
"is_parallel": true,
"self": 0.00565206499982196,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0030588430001898814,
"count": 10,
"is_parallel": true,
"self": 0.0030588430001898814
}
}
},
"UnityEnvironment.step": {
"total": 0.04635482699995919,
"count": 1,
"is_parallel": true,
"self": 0.0008227309999710997,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004527680000592227,
"count": 1,
"is_parallel": true,
"self": 0.0004527680000592227
},
"communicator.exchange": {
"total": 0.04247283899997001,
"count": 1,
"is_parallel": true,
"self": 0.04247283899997001
},
"steps_from_proto": {
"total": 0.002606488999958856,
"count": 1,
"is_parallel": true,
"self": 0.0004839759997139481,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021225130002449077,
"count": 10,
"is_parallel": true,
"self": 0.0021225130002449077
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 632.7368879500068,
"count": 36399,
"is_parallel": true,
"self": 30.229353984987824,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.235371787013605,
"count": 36399,
"is_parallel": true,
"self": 15.235371787013605
},
"communicator.exchange": {
"total": 497.27862538700526,
"count": 36399,
"is_parallel": true,
"self": 497.27862538700526
},
"steps_from_proto": {
"total": 89.99353679100011,
"count": 36399,
"is_parallel": true,
"self": 17.680752999021934,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.31278379197818,
"count": 363990,
"is_parallel": true,
"self": 72.31278379197818
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00042596899993441184,
"count": 1,
"self": 0.00042596899993441184,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1204.2508206179234,
"count": 1564804,
"is_parallel": true,
"self": 37.052642806129825,
"children": {
"process_trajectory": {
"total": 638.6995964737937,
"count": 1564804,
"is_parallel": true,
"self": 636.6551229837937,
"children": {
"RLTrainer._checkpoint": {
"total": 2.044473489999973,
"count": 8,
"is_parallel": true,
"self": 2.044473489999973
}
}
},
"_update_policy": {
"total": 528.4985813379999,
"count": 181,
"is_parallel": true,
"self": 148.3633733199904,
"children": {
"TorchPPOOptimizer.update": {
"total": 380.1352080180095,
"count": 9228,
"is_parallel": true,
"self": 380.1352080180095
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09588562200042361,
"count": 1,
"self": 0.0013083770004413964,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09457724499998221,
"count": 1,
"self": 0.09457724499998221
}
}
}
}
}
}
}