EExe's picture
Quick snowball fight
e4d0efd
raw
history blame contribute delete
No virus
18.4 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.6395481824874878,
"min": 1.6395481824874878,
"max": 2.8564162254333496,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 16736.5078125,
"min": 16736.5078125,
"max": 29441.08203125,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 8.185148239135742,
"min": 0.4165266454219818,
"max": 8.185148239135742,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1669.770263671875,
"min": 80.80616760253906,
"max": 1669.770263671875,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06609853671594892,
"min": 0.06154232791998178,
"max": 0.07377701656214947,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.33049268357974465,
"min": 0.2579160844564766,
"max": 0.35723233786320274,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2254839167875402,
"min": 0.13958177042613718,
"max": 0.2657166821115157,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.127419583937701,
"min": 0.5583270817045487,
"max": 1.3285834105575787,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6464094511999996e-05,
"min": 1.6464094511999996e-05,
"max": 0.000283764005412,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.232047255999999e-05,
"min": 8.232047255999999e-05,
"max": 0.00127032007656,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.105488,
"min": 0.105488,
"max": 0.194588,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.52744,
"min": 0.4615520000000001,
"max": 0.92344,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002838512,
"min": 0.0002838512,
"max": 0.0047299412,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.001419256,
"min": 0.001419256,
"max": 0.021179656,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 17.763636363636362,
"min": 3.659090909090909,
"max": 17.763636363636362,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 977.0,
"min": 161.0,
"max": 977.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 17.763636363636362,
"min": 3.659090909090909,
"max": 17.763636363636362,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 977.0,
"min": 161.0,
"max": 977.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678974684",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678974915"
},
"total": 231.72714404600003,
"count": 1,
"self": 0.43015521300003456,
"children": {
"run_training.setup": {
"total": 0.10267839799996636,
"count": 1,
"self": 0.10267839799996636
},
"TrainerController.start_learning": {
"total": 231.19431043500003,
"count": 1,
"self": 0.28420954900218476,
"children": {
"TrainerController._reset_env": {
"total": 8.725130409999906,
"count": 1,
"self": 8.725130409999906
},
"TrainerController.advance": {
"total": 222.05590402599785,
"count": 9137,
"self": 0.14012065399799667,
"children": {
"env_step": {
"total": 221.91578337199985,
"count": 9137,
"self": 161.63861024099265,
"children": {
"SubprocessEnvManager._take_step": {
"total": 60.13953969100032,
"count": 9137,
"self": 0.9036570289957808,
"children": {
"TorchPolicy.evaluate": {
"total": 59.23588266200454,
"count": 9137,
"self": 59.23588266200454
}
}
},
"workers": {
"total": 0.13763344000687994,
"count": 9137,
"self": 0.0,
"children": {
"worker_root": {
"total": 230.49218445699955,
"count": 9137,
"is_parallel": true,
"self": 112.1439208249983,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006184263000022838,
"count": 1,
"is_parallel": true,
"self": 0.004136266999921645,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002047996000101193,
"count": 10,
"is_parallel": true,
"self": 0.002047996000101193
}
}
},
"UnityEnvironment.step": {
"total": 0.03730256799997278,
"count": 1,
"is_parallel": true,
"self": 0.0003904319999037398,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003292760000022099,
"count": 1,
"is_parallel": true,
"self": 0.0003292760000022099
},
"communicator.exchange": {
"total": 0.03493792200004009,
"count": 1,
"is_parallel": true,
"self": 0.03493792200004009
},
"steps_from_proto": {
"total": 0.001644938000026741,
"count": 1,
"is_parallel": true,
"self": 0.0003359729997782779,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013089650002484632,
"count": 10,
"is_parallel": true,
"self": 0.0013089650002484632
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 118.34826363200125,
"count": 9136,
"is_parallel": true,
"self": 4.6598411970014695,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.5773885869973583,
"count": 9136,
"is_parallel": true,
"self": 2.5773885869973583
},
"communicator.exchange": {
"total": 96.12857439599622,
"count": 9136,
"is_parallel": true,
"self": 96.12857439599622
},
"steps_from_proto": {
"total": 14.982459452006196,
"count": 9136,
"is_parallel": true,
"self": 2.9818330040078536,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12.000626447998343,
"count": 91360,
"is_parallel": true,
"self": 12.000626447998343
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.000274155000056453,
"count": 1,
"self": 0.000274155000056453,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 220.58801289596852,
"count": 191925,
"is_parallel": true,
"self": 4.598340775951215,
"children": {
"process_trajectory": {
"total": 121.84949192101749,
"count": 191925,
"is_parallel": true,
"self": 121.04439630601746,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8050956150000275,
"count": 2,
"is_parallel": true,
"self": 0.8050956150000275
}
}
},
"_update_policy": {
"total": 94.14018019899981,
"count": 45,
"is_parallel": true,
"self": 35.38208663899843,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.758093560001385,
"count": 2289,
"is_parallel": true,
"self": 58.758093560001385
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1287922950000393,
"count": 1,
"self": 0.0009033990000943959,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1278888959999449,
"count": 1,
"self": 0.1278888959999449
}
}
}
}
}
}
}