turgutburak01's picture
First Push
a52a04d verified
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8285091519355774,
"min": 0.8165498375892639,
"max": 2.8554112911224365,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7910.60546875,
"min": 7910.60546875,
"max": 29273.677734375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.770590782165527,
"min": 0.4351233243942261,
"max": 12.770590782165527,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2490.26513671875,
"min": 84.41392517089844,
"max": 2613.099365234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06771687565196771,
"min": 0.061401630520669964,
"max": 0.07312868266698762,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27086750260787085,
"min": 0.24560652208267986,
"max": 0.3582391675679223,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1979734380455578,
"min": 0.09601693931515055,
"max": 0.2913964162854587,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7918937521822312,
"min": 0.3840677572606022,
"max": 1.4569820814272936,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.001385256038248,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.961752,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.0230914248,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.636363636363637,
"min": 3.0681818181818183,
"max": 25.272727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1084.0,
"min": 135.0,
"max": 1390.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.636363636363637,
"min": 3.0681818181818183,
"max": 25.272727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1084.0,
"min": 135.0,
"max": 1390.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707373011",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707373493"
},
"total": 482.1834616809999,
"count": 1,
"self": 0.4388804029999278,
"children": {
"run_training.setup": {
"total": 0.05030994099996633,
"count": 1,
"self": 0.05030994099996633
},
"TrainerController.start_learning": {
"total": 481.69427133700003,
"count": 1,
"self": 0.6157540050070338,
"children": {
"TrainerController._reset_env": {
"total": 3.9616631289999873,
"count": 1,
"self": 3.9616631289999873
},
"TrainerController.advance": {
"total": 477.0247452669931,
"count": 18200,
"self": 0.2997630049891882,
"children": {
"env_step": {
"total": 476.7249822620039,
"count": 18200,
"self": 309.004646249004,
"children": {
"SubprocessEnvManager._take_step": {
"total": 167.39694554000533,
"count": 18200,
"self": 1.5787248890098908,
"children": {
"TorchPolicy.evaluate": {
"total": 165.81822065099544,
"count": 18200,
"self": 165.81822065099544
}
}
},
"workers": {
"total": 0.3233904729945607,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 480.3478193909998,
"count": 18200,
"is_parallel": true,
"self": 237.7628350530049,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005705104000014671,
"count": 1,
"is_parallel": true,
"self": 0.004106535000005351,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00159856900000932,
"count": 10,
"is_parallel": true,
"self": 0.00159856900000932
}
}
},
"UnityEnvironment.step": {
"total": 0.043505696000011085,
"count": 1,
"is_parallel": true,
"self": 0.0008049519999531185,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005127450000372846,
"count": 1,
"is_parallel": true,
"self": 0.0005127450000372846
},
"communicator.exchange": {
"total": 0.039704819000007774,
"count": 1,
"is_parallel": true,
"self": 0.039704819000007774
},
"steps_from_proto": {
"total": 0.002483180000012908,
"count": 1,
"is_parallel": true,
"self": 0.000481443999944986,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002001736000067922,
"count": 10,
"is_parallel": true,
"self": 0.002001736000067922
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 242.5849843379949,
"count": 18199,
"is_parallel": true,
"self": 11.601160779989414,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.960717912996813,
"count": 18199,
"is_parallel": true,
"self": 5.960717912996813
},
"communicator.exchange": {
"total": 187.2752166270041,
"count": 18199,
"is_parallel": true,
"self": 187.2752166270041
},
"steps_from_proto": {
"total": 37.74788901800457,
"count": 18199,
"is_parallel": true,
"self": 6.987348224989944,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.760540793014627,
"count": 181990,
"is_parallel": true,
"self": 30.760540793014627
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013341299995772715,
"count": 1,
"self": 0.00013341299995772715,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 470.76064889798977,
"count": 719324,
"is_parallel": true,
"self": 16.189526099057332,
"children": {
"process_trajectory": {
"total": 261.1784381069323,
"count": 719324,
"is_parallel": true,
"self": 260.64203366293225,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5364044440000271,
"count": 4,
"is_parallel": true,
"self": 0.5364044440000271
}
}
},
"_update_policy": {
"total": 193.39268469200016,
"count": 90,
"is_parallel": true,
"self": 54.17852410900201,
"children": {
"TorchPPOOptimizer.update": {
"total": 139.21416058299815,
"count": 4584,
"is_parallel": true,
"self": 139.21416058299815
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09197552299997369,
"count": 1,
"self": 0.0009376809999821489,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09103784199999154,
"count": 1,
"self": 0.09103784199999154
}
}
}
}
}
}
}