DayStay's picture
First Push
6465e37
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8611301183700562,
"min": 0.8611301183700562,
"max": 2.8657422065734863,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8222.0703125,
"min": 8222.0703125,
"max": 29411.111328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.964935302734375,
"min": 0.2805672883987427,
"max": 12.964935302734375,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2528.162353515625,
"min": 54.4300537109375,
"max": 2617.423095703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07358072317462452,
"min": 0.059199218854308705,
"max": 0.07714252447773357,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2943228926984981,
"min": 0.23679687541723482,
"max": 0.3857126223886678,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19207708479142654,
"min": 0.1256192287649302,
"max": 0.28288584357383206,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7683083391657062,
"min": 0.5024769150597208,
"max": 1.4144292178691604,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.818181818181817,
"min": 3.6363636363636362,
"max": 25.818181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1136.0,
"min": 160.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.818181818181817,
"min": 3.6363636363636362,
"max": 25.818181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1136.0,
"min": 160.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700231706",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700232195"
},
"total": 489.0650146920001,
"count": 1,
"self": 0.4301758650000238,
"children": {
"run_training.setup": {
"total": 0.04184989600003064,
"count": 1,
"self": 0.04184989600003064
},
"TrainerController.start_learning": {
"total": 488.59298893100004,
"count": 1,
"self": 0.6178613840039588,
"children": {
"TrainerController._reset_env": {
"total": 8.219567095000002,
"count": 1,
"self": 8.219567095000002
},
"TrainerController.advance": {
"total": 479.6680367959963,
"count": 18199,
"self": 0.292514478997532,
"children": {
"env_step": {
"total": 479.37552231699874,
"count": 18199,
"self": 331.6446580650032,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.43638202900638,
"count": 18199,
"self": 1.5118916080066356,
"children": {
"TorchPolicy.evaluate": {
"total": 145.92449042099975,
"count": 18199,
"self": 145.92449042099975
}
}
},
"workers": {
"total": 0.2944822229891315,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 487.0837858950157,
"count": 18199,
"is_parallel": true,
"self": 234.55342411300717,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0054981229999384595,
"count": 1,
"is_parallel": true,
"self": 0.003989320000073349,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001508802999865111,
"count": 10,
"is_parallel": true,
"self": 0.001508802999865111
}
}
},
"UnityEnvironment.step": {
"total": 0.03484560000003967,
"count": 1,
"is_parallel": true,
"self": 0.0006377680000468899,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003853929999877437,
"count": 1,
"is_parallel": true,
"self": 0.0003853929999877437
},
"communicator.exchange": {
"total": 0.031897883999931764,
"count": 1,
"is_parallel": true,
"self": 0.031897883999931764
},
"steps_from_proto": {
"total": 0.0019245550000732692,
"count": 1,
"is_parallel": true,
"self": 0.0003896760000543509,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015348790000189183,
"count": 10,
"is_parallel": true,
"self": 0.0015348790000189183
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 252.53036178200853,
"count": 18198,
"is_parallel": true,
"self": 10.890032941028835,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.643882936981754,
"count": 18198,
"is_parallel": true,
"self": 5.643882936981754
},
"communicator.exchange": {
"total": 200.43077331299878,
"count": 18198,
"is_parallel": true,
"self": 200.43077331299878
},
"steps_from_proto": {
"total": 35.56567259099916,
"count": 18198,
"is_parallel": true,
"self": 6.923681450996696,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.64199114000246,
"count": 181980,
"is_parallel": true,
"self": 28.64199114000246
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014302199997473508,
"count": 1,
"self": 0.00014302199997473508,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 475.5370926920847,
"count": 520354,
"is_parallel": true,
"self": 10.760590513034686,
"children": {
"process_trajectory": {
"total": 269.7422711870504,
"count": 520354,
"is_parallel": true,
"self": 269.17178609905034,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5704850880000549,
"count": 4,
"is_parallel": true,
"self": 0.5704850880000549
}
}
},
"_update_policy": {
"total": 195.0342309919996,
"count": 90,
"is_parallel": true,
"self": 60.956998150999425,
"children": {
"TorchPPOOptimizer.update": {
"total": 134.07723284100018,
"count": 4587,
"is_parallel": true,
"self": 134.07723284100018
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.0873806339998282,
"count": 1,
"self": 0.001052768999898035,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08632786499993017,
"count": 1,
"self": 0.08632786499993017
}
}
}
}
}
}
}