Swadine's picture
ppo-Snowball agent
ebd4c58
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8569635152816772,
"min": 0.8569635152816772,
"max": 2.884730339050293,
"count": 40
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4373.94189453125,
"min": 3861.56884765625,
"max": 14945.7880859375,
"count": 40
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 4976.0,
"max": 199984.0,
"count": 40
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 4976.0,
"max": 199984.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.138785362243652,
"min": 0.24235770106315613,
"max": 13.178133010864258,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1287.6009521484375,
"min": 23.508697509765625,
"max": 1401.1787109375,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 4378.0,
"min": 4378.0,
"max": 6567.0,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06602572204542859,
"min": 0.05786793156528124,
"max": 0.07803093159443462,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.13205144409085717,
"min": 0.11573586313056249,
"max": 0.23409279478330386,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1944483096400897,
"min": 0.10000296612726707,
"max": 0.28090790016394035,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.3888966192801794,
"min": 0.20000593225453414,
"max": 0.7729679322710223,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 4.782098406000003e-06,
"min": 4.782098406000003e-06,
"max": 0.000295182001606,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 9.564196812000006e-06,
"min": 9.564196812000006e-06,
"max": 0.0008211960262680001,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.101594,
"min": 0.101594,
"max": 0.19839400000000001,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.203188,
"min": 0.203188,
"max": 0.573732,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 8.954060000000007e-05,
"min": 8.954060000000007e-05,
"max": 0.0049198606,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00017908120000000014,
"min": 0.00017908120000000014,
"max": 0.013689226799999999,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.40909090909091,
"min": 3.090909090909091,
"max": 26.545454545454547,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 559.0,
"min": 68.0,
"max": 850.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.40909090909091,
"min": 3.090909090909091,
"max": 26.545454545454547,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 559.0,
"min": 68.0,
"max": 850.0,
"count": 40
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704383106",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704383567"
},
"total": 460.547715897,
"count": 1,
"self": 0.43648430000030203,
"children": {
"run_training.setup": {
"total": 0.05413135099979627,
"count": 1,
"self": 0.05413135099979627
},
"TrainerController.start_learning": {
"total": 460.0571002459999,
"count": 1,
"self": 0.577986492986156,
"children": {
"TrainerController._reset_env": {
"total": 3.2748683379998056,
"count": 1,
"self": 3.2748683379998056
},
"TrainerController.advance": {
"total": 456.11380026601387,
"count": 18199,
"self": 0.2808561130102589,
"children": {
"env_step": {
"total": 455.8329441530036,
"count": 18199,
"self": 299.418364931038,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.12534950096006,
"count": 18199,
"self": 1.4939536229703663,
"children": {
"TorchPolicy.evaluate": {
"total": 154.6313958779897,
"count": 18199,
"self": 154.6313958779897
}
}
},
"workers": {
"total": 0.28922972100554034,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 458.76977767401104,
"count": 18199,
"is_parallel": true,
"self": 227.3082811209838,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005656270000145014,
"count": 1,
"is_parallel": true,
"self": 0.0041150920001200575,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015411780000249564,
"count": 10,
"is_parallel": true,
"self": 0.0015411780000249564
}
}
},
"UnityEnvironment.step": {
"total": 0.039041193999992174,
"count": 1,
"is_parallel": true,
"self": 0.0006756370000857714,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003343670000504062,
"count": 1,
"is_parallel": true,
"self": 0.0003343670000504062
},
"communicator.exchange": {
"total": 0.03594256899987158,
"count": 1,
"is_parallel": true,
"self": 0.03594256899987158
},
"steps_from_proto": {
"total": 0.002088620999984414,
"count": 1,
"is_parallel": true,
"self": 0.00039119400025811046,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016974269997263036,
"count": 10,
"is_parallel": true,
"self": 0.0016974269997263036
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 231.46149655302725,
"count": 18198,
"is_parallel": true,
"self": 10.930258576057668,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.569862104984395,
"count": 18198,
"is_parallel": true,
"self": 5.569862104984395
},
"communicator.exchange": {
"total": 179.55814283699237,
"count": 18198,
"is_parallel": true,
"self": 179.55814283699237
},
"steps_from_proto": {
"total": 35.40323303499281,
"count": 18198,
"is_parallel": true,
"self": 6.5611687809609975,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.84206425403181,
"count": 181980,
"is_parallel": true,
"self": 28.84206425403181
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011959800008298771,
"count": 1,
"self": 0.00011959800008298771,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 450.3568553629618,
"count": 677392,
"is_parallel": true,
"self": 14.859978161998924,
"children": {
"process_trajectory": {
"total": 248.14367853696172,
"count": 677392,
"is_parallel": true,
"self": 247.67221901196172,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4714595250000002,
"count": 4,
"is_parallel": true,
"self": 0.4714595250000002
}
}
},
"_update_policy": {
"total": 187.35319866400118,
"count": 90,
"is_parallel": true,
"self": 60.43853757299894,
"children": {
"TorchPPOOptimizer.update": {
"total": 126.91466109100224,
"count": 4587,
"is_parallel": true,
"self": 126.91466109100224
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09032555099997808,
"count": 1,
"self": 0.0009582889999819599,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08936726199999612,
"count": 1,
"self": 0.08936726199999612
}
}
}
}
}
}
}