MerlinTK's picture
feat: first model
9e928ff
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0974037647247314,
"min": 1.0974037647247314,
"max": 2.8498051166534424,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10490.08203125,
"min": 10490.08203125,
"max": 29247.55078125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.311443328857422,
"min": 0.3646710515022278,
"max": 12.311443328857422,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2400.7314453125,
"min": 70.74618530273438,
"max": 2484.01806640625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06723358795473662,
"min": 0.06066721534786185,
"max": 0.07863817447910597,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2689343518189465,
"min": 0.2426688613914474,
"max": 0.39319087239552986,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1967271593417607,
"min": 0.14374250991101029,
"max": 0.2900221366070065,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7869086373670428,
"min": 0.5749700396440411,
"max": 1.365804787067806,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10422365320000002,
"min": 0.10422365320000002,
"max": 0.2525375332,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41689461280000006,
"min": 0.41689461280000006,
"max": 1.223884616,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00022495426000000016,
"min": 0.00022495426000000016,
"max": 0.00777308826,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0008998170400000006,
"min": 0.0008998170400000006,
"max": 0.0368906388,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.068181818181817,
"min": 3.909090909090909,
"max": 25.068181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1103.0,
"min": 172.0,
"max": 1336.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.068181818181817,
"min": 3.909090909090909,
"max": 25.068181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1103.0,
"min": 172.0,
"max": 1336.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681551742",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681552209"
},
"total": 467.1495823750001,
"count": 1,
"self": 0.38718284200007247,
"children": {
"run_training.setup": {
"total": 0.11083301000002166,
"count": 1,
"self": 0.11083301000002166
},
"TrainerController.start_learning": {
"total": 466.651566523,
"count": 1,
"self": 0.5603837689976103,
"children": {
"TrainerController._reset_env": {
"total": 4.845604797000021,
"count": 1,
"self": 4.845604797000021
},
"TrainerController.advance": {
"total": 461.11158312900244,
"count": 18202,
"self": 0.27578611501104433,
"children": {
"env_step": {
"total": 460.8357970139914,
"count": 18202,
"self": 340.3423669199844,
"children": {
"SubprocessEnvManager._take_step": {
"total": 120.222752863003,
"count": 18202,
"self": 1.7590816070042479,
"children": {
"TorchPolicy.evaluate": {
"total": 118.46367125599875,
"count": 18202,
"self": 118.46367125599875
}
}
},
"workers": {
"total": 0.2706772310040151,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 465.1455494600033,
"count": 18202,
"is_parallel": true,
"self": 216.54795951900763,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005351653999980499,
"count": 1,
"is_parallel": true,
"self": 0.0038388239999562757,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015128300000242234,
"count": 10,
"is_parallel": true,
"self": 0.0015128300000242234
}
}
},
"UnityEnvironment.step": {
"total": 0.05467439199998125,
"count": 1,
"is_parallel": true,
"self": 0.000570363999941037,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004392029999849001,
"count": 1,
"is_parallel": true,
"self": 0.0004392029999849001
},
"communicator.exchange": {
"total": 0.051751412000044184,
"count": 1,
"is_parallel": true,
"self": 0.051751412000044184
},
"steps_from_proto": {
"total": 0.0019134130000111327,
"count": 1,
"is_parallel": true,
"self": 0.00040408500001376524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015093279999973674,
"count": 10,
"is_parallel": true,
"self": 0.0015093279999973674
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 248.59758994099565,
"count": 18201,
"is_parallel": true,
"self": 9.891730777003659,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.452850728995486,
"count": 18201,
"is_parallel": true,
"self": 5.452850728995486
},
"communicator.exchange": {
"total": 200.77159298499333,
"count": 18201,
"is_parallel": true,
"self": 200.77159298499333
},
"steps_from_proto": {
"total": 32.48141545000317,
"count": 18201,
"is_parallel": true,
"self": 6.488548882987402,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.992866567015767,
"count": 182010,
"is_parallel": true,
"self": 25.992866567015767
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001525609999362132,
"count": 1,
"self": 0.0001525609999362132,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 457.6503853660689,
"count": 414184,
"is_parallel": true,
"self": 10.222789125032705,
"children": {
"process_trajectory": {
"total": 251.95813070103617,
"count": 414184,
"is_parallel": true,
"self": 250.84701764803611,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1111130530000537,
"count": 4,
"is_parallel": true,
"self": 1.1111130530000537
}
}
},
"_update_policy": {
"total": 195.46946554000004,
"count": 90,
"is_parallel": true,
"self": 77.07366204099685,
"children": {
"TorchPPOOptimizer.update": {
"total": 118.3958034990032,
"count": 4587,
"is_parallel": true,
"self": 118.3958034990032
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13384226699997726,
"count": 1,
"self": 0.000879474000043956,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1329627929999333,
"count": 1,
"self": 0.1329627929999333
}
}
}
}
}
}
}