mike-krk's picture
More learning
0d26b93
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0691092014312744,
"min": 1.0691092014312744,
"max": 1.5942479372024536,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10972.267578125,
"min": 10888.201171875,
"max": 16309.15625,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 299944.0,
"min": 209936.0,
"max": 299944.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 299944.0,
"min": 209936.0,
"max": 299944.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.593055725097656,
"min": 10.063211441040039,
"max": 12.593055725097656,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2568.9833984375,
"min": 1932.1365966796875,
"max": 2568.9833984375,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.09090909090909,
"min": 21.136363636363637,
"max": 25.34090909090909,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1380.0,
"min": 930.0,
"max": 1393.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.09090909090909,
"min": 21.136363636363637,
"max": 25.34090909090909,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1380.0,
"min": 930.0,
"max": 1393.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06916270711671506,
"min": 0.05830706287302104,
"max": 0.07310536152149058,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.13832541423343012,
"min": 0.11661412574604207,
"max": 0.1972248834268505,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21709369035328135,
"min": 0.20926002522601803,
"max": 0.2832611144624929,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.4341873807065627,
"min": 0.4341873807065627,
"max": 0.7940560870749109,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.376098207999998e-06,
"min": 5.376098207999998e-06,
"max": 9.337606887466668e-05,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.0752196415999996e-05,
"min": 1.0752196415999996e-05,
"max": 0.00019432823522399996,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10179200000000001,
"min": 0.10179200000000001,
"max": 0.13112533333333337,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20358400000000001,
"min": 0.20358400000000001,
"max": 0.364776,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.942079999999998e-05,
"min": 9.942079999999998e-05,
"max": 0.0015631541333333333,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00019884159999999996,
"min": 0.00019884159999999996,
"max": 0.003262322400000001,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702854464",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702854685"
},
"total": 220.82709319900005,
"count": 1,
"self": 0.6029564710001978,
"children": {
"run_training.setup": {
"total": 0.056017914999983986,
"count": 1,
"self": 0.056017914999983986
},
"TrainerController.start_learning": {
"total": 220.16811881299986,
"count": 1,
"self": 0.33025679398633656,
"children": {
"TrainerController._reset_env": {
"total": 2.0461344620000546,
"count": 1,
"self": 2.0461344620000546
},
"TrainerController.advance": {
"total": 217.58931440201354,
"count": 9132,
"self": 0.13580619401659533,
"children": {
"env_step": {
"total": 217.45350820799695,
"count": 9132,
"self": 148.38303226201378,
"children": {
"SubprocessEnvManager._take_step": {
"total": 68.92934167998396,
"count": 9132,
"self": 0.7219665359930332,
"children": {
"TorchPolicy.evaluate": {
"total": 68.20737514399093,
"count": 9132,
"self": 68.20737514399093
}
}
},
"workers": {
"total": 0.14113426599919876,
"count": 9132,
"self": 0.0,
"children": {
"worker_root": {
"total": 219.39355299799695,
"count": 9132,
"is_parallel": true,
"self": 107.01856439499534,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016263719999187742,
"count": 1,
"is_parallel": true,
"self": 0.00047473800032094005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011516339995978342,
"count": 10,
"is_parallel": true,
"self": 0.0011516339995978342
}
}
},
"UnityEnvironment.step": {
"total": 0.03596235300005901,
"count": 1,
"is_parallel": true,
"self": 0.0006419720002668328,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004035790000216366,
"count": 1,
"is_parallel": true,
"self": 0.0004035790000216366
},
"communicator.exchange": {
"total": 0.03301259499994558,
"count": 1,
"is_parallel": true,
"self": 0.03301259499994558
},
"steps_from_proto": {
"total": 0.0019042069998249644,
"count": 1,
"is_parallel": true,
"self": 0.00038447399970209517,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015197330001228693,
"count": 10,
"is_parallel": true,
"self": 0.0015197330001228693
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 112.37498860300161,
"count": 9131,
"is_parallel": true,
"self": 5.284206426007586,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.6740532879891816,
"count": 9131,
"is_parallel": true,
"self": 2.6740532879891816
},
"communicator.exchange": {
"total": 87.90271529498432,
"count": 9131,
"is_parallel": true,
"self": 87.90271529498432
},
"steps_from_proto": {
"total": 16.514013594020525,
"count": 9131,
"is_parallel": true,
"self": 3.078017339043754,
"children": {
"_process_rank_one_or_two_observation": {
"total": 13.435996254976772,
"count": 91310,
"is_parallel": true,
"self": 13.435996254976772
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0004031409998788149,
"count": 1,
"self": 0.0004031409998788149,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 213.92445111497977,
"count": 444850,
"is_parallel": true,
"self": 9.002163526009326,
"children": {
"process_trajectory": {
"total": 150.38162959797,
"count": 444850,
"is_parallel": true,
"self": 149.88418699196995,
"children": {
"RLTrainer._checkpoint": {
"total": 0.49744260600004964,
"count": 2,
"is_parallel": true,
"self": 0.49744260600004964
}
}
},
"_update_policy": {
"total": 54.540657991000444,
"count": 22,
"is_parallel": true,
"self": 17.89772584499974,
"children": {
"TorchPPOOptimizer.update": {
"total": 36.6429321460007,
"count": 1494,
"is_parallel": true,
"self": 36.6429321460007
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.20201001400005225,
"count": 1,
"self": 0.004421799000056126,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19758821499999613,
"count": 1,
"self": 0.19758821499999613
}
}
}
}
}
}
}