tmpusr's picture
First Push
bd72f22
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0040364265441895,
"min": 1.0040364265441895,
"max": 2.873852252960205,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9608.62890625,
"min": 9608.62890625,
"max": 29525.958984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.970771789550781,
"min": 0.41777828335762024,
"max": 12.970771789550781,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2529.300537109375,
"min": 81.04898834228516,
"max": 2623.816162109375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07090346473206392,
"min": 0.06335814067929545,
"max": 0.0772896319446171,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2836138589282557,
"min": 0.2617088884478161,
"max": 0.36295459813646075,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20142017549597746,
"min": 0.11772417878462732,
"max": 0.27004651557288917,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8056807019839098,
"min": 0.4708967151385093,
"max": 1.330227179854524,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.363636363636363,
"min": 3.522727272727273,
"max": 25.4,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1116.0,
"min": 155.0,
"max": 1397.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.363636363636363,
"min": 3.522727272727273,
"max": 25.4,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1116.0,
"min": 155.0,
"max": 1397.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686224318",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686224794"
},
"total": 475.752323716,
"count": 1,
"self": 0.3883808379999891,
"children": {
"run_training.setup": {
"total": 0.05853340399994522,
"count": 1,
"self": 0.05853340399994522
},
"TrainerController.start_learning": {
"total": 475.30540947400004,
"count": 1,
"self": 0.5279360380005755,
"children": {
"TrainerController._reset_env": {
"total": 4.948084498999947,
"count": 1,
"self": 4.948084498999947
},
"TrainerController.advance": {
"total": 469.6817250499996,
"count": 18205,
"self": 0.258410945011633,
"children": {
"env_step": {
"total": 469.423314104988,
"count": 18205,
"self": 339.96352761299227,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.19253908799158,
"count": 18205,
"self": 1.7888844660100176,
"children": {
"TorchPolicy.evaluate": {
"total": 127.40365462198156,
"count": 18205,
"self": 127.40365462198156
}
}
},
"workers": {
"total": 0.26724740400413793,
"count": 18205,
"self": 0.0,
"children": {
"worker_root": {
"total": 473.6543426510036,
"count": 18205,
"is_parallel": true,
"self": 225.06619634500942,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007000381000011657,
"count": 1,
"is_parallel": true,
"self": 0.00479913900005613,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022012419999555277,
"count": 10,
"is_parallel": true,
"self": 0.0022012419999555277
}
}
},
"UnityEnvironment.step": {
"total": 0.03559941899993646,
"count": 1,
"is_parallel": true,
"self": 0.0005572110000002795,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000367146000030516,
"count": 1,
"is_parallel": true,
"self": 0.000367146000030516
},
"communicator.exchange": {
"total": 0.032604382999920745,
"count": 1,
"is_parallel": true,
"self": 0.032604382999920745
},
"steps_from_proto": {
"total": 0.0020706789999849207,
"count": 1,
"is_parallel": true,
"self": 0.000381902000071932,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016887769999129887,
"count": 10,
"is_parallel": true,
"self": 0.0016887769999129887
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 248.5881463059942,
"count": 18204,
"is_parallel": true,
"self": 9.902435393983751,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.556366875003391,
"count": 18204,
"is_parallel": true,
"self": 5.556366875003391
},
"communicator.exchange": {
"total": 197.82954818399935,
"count": 18204,
"is_parallel": true,
"self": 197.82954818399935
},
"steps_from_proto": {
"total": 35.29979585300771,
"count": 18204,
"is_parallel": true,
"self": 6.747565265978665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.552230587029044,
"count": 182040,
"is_parallel": true,
"self": 28.552230587029044
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013384299995777837,
"count": 1,
"self": 0.00013384299995777837,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 465.94934846901333,
"count": 456224,
"is_parallel": true,
"self": 10.11275479399319,
"children": {
"process_trajectory": {
"total": 255.17225393402032,
"count": 456224,
"is_parallel": true,
"self": 254.35860160202037,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8136523319999469,
"count": 4,
"is_parallel": true,
"self": 0.8136523319999469
}
}
},
"_update_policy": {
"total": 200.66433974099982,
"count": 90,
"is_parallel": true,
"self": 77.77026033400296,
"children": {
"TorchPPOOptimizer.update": {
"total": 122.89407940699687,
"count": 4587,
"is_parallel": true,
"self": 122.89407940699687
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14753004399995007,
"count": 1,
"self": 0.0009672139999565843,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14656282999999348,
"count": 1,
"self": 0.14656282999999348
}
}
}
}
}
}
}