SnowballTarget1 / run_logs /timers.json
Arch4ngel's picture
Init commit
b087a7a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0942000150680542,
"min": 1.0942000150680542,
"max": 2.8682949542999268,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10447.421875,
"min": 10447.421875,
"max": 29374.208984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.525918960571289,
"min": 0.5909805297851562,
"max": 12.525918960571289,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2442.55419921875,
"min": 114.65022277832031,
"max": 2545.302734375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06227211903680262,
"min": 0.06227211903680262,
"max": 0.0801908915514907,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24908847614721047,
"min": 0.24908847614721047,
"max": 0.36801745339671116,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1931430067428771,
"min": 0.12608044048515604,
"max": 0.27375477774470464,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7725720269715084,
"min": 0.5043217619406242,
"max": 1.3687738887235232,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.40909090909091,
"min": 3.75,
"max": 24.90909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1074.0,
"min": 165.0,
"max": 1370.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.40909090909091,
"min": 3.75,
"max": 24.90909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1074.0,
"min": 165.0,
"max": 1370.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673456868",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673457335"
},
"total": 467.61857581899994,
"count": 1,
"self": 0.4349125329998742,
"children": {
"run_training.setup": {
"total": 0.11217131400007929,
"count": 1,
"self": 0.11217131400007929
},
"TrainerController.start_learning": {
"total": 467.071491972,
"count": 1,
"self": 0.596926643009283,
"children": {
"TrainerController._reset_env": {
"total": 7.475590416999921,
"count": 1,
"self": 7.475590416999921
},
"TrainerController.advance": {
"total": 458.8694602429906,
"count": 18201,
"self": 0.32691252498466383,
"children": {
"env_step": {
"total": 458.54254771800595,
"count": 18201,
"self": 303.20210405601085,
"children": {
"SubprocessEnvManager._take_step": {
"total": 155.02560296799334,
"count": 18201,
"self": 1.5236862590054443,
"children": {
"TorchPolicy.evaluate": {
"total": 153.5019167089879,
"count": 18201,
"self": 34.84279805298729,
"children": {
"TorchPolicy.sample_actions": {
"total": 118.6591186560006,
"count": 18201,
"self": 118.6591186560006
}
}
}
}
},
"workers": {
"total": 0.3148406940017594,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 465.6876764470021,
"count": 18201,
"is_parallel": true,
"self": 221.0277791320035,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006565117999912218,
"count": 1,
"is_parallel": true,
"self": 0.0038448719999450987,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002720245999967119,
"count": 10,
"is_parallel": true,
"self": 0.002720245999967119
}
}
},
"UnityEnvironment.step": {
"total": 0.03262800699997115,
"count": 1,
"is_parallel": true,
"self": 0.0005421719999958441,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00035759499996856903,
"count": 1,
"is_parallel": true,
"self": 0.00035759499996856903
},
"communicator.exchange": {
"total": 0.029715485999986413,
"count": 1,
"is_parallel": true,
"self": 0.029715485999986413
},
"steps_from_proto": {
"total": 0.0020127540000203226,
"count": 1,
"is_parallel": true,
"self": 0.0004986720000488276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001514081999971495,
"count": 10,
"is_parallel": true,
"self": 0.001514081999971495
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 244.6598973149986,
"count": 18200,
"is_parallel": true,
"self": 9.315655299995683,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.313164259007863,
"count": 18200,
"is_parallel": true,
"self": 5.313164259007863
},
"communicator.exchange": {
"total": 197.1926399030025,
"count": 18200,
"is_parallel": true,
"self": 197.1926399030025
},
"steps_from_proto": {
"total": 32.83843785299257,
"count": 18200,
"is_parallel": true,
"self": 7.22408198602534,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.61435586696723,
"count": 182000,
"is_parallel": true,
"self": 25.61435586696723
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.7705000042697066e-05,
"count": 1,
"self": 4.7705000042697066e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 455.65411804802466,
"count": 377128,
"is_parallel": true,
"self": 10.105671874045925,
"children": {
"process_trajectory": {
"total": 259.9929893109779,
"count": 377128,
"is_parallel": true,
"self": 259.19807375297796,
"children": {
"RLTrainer._checkpoint": {
"total": 0.794915557999957,
"count": 4,
"is_parallel": true,
"self": 0.794915557999957
}
}
},
"_update_policy": {
"total": 185.5554568630008,
"count": 90,
"is_parallel": true,
"self": 48.50405841100337,
"children": {
"TorchPPOOptimizer.update": {
"total": 137.05139845199744,
"count": 4587,
"is_parallel": true,
"self": 137.05139845199744
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12946696400013025,
"count": 1,
"self": 0.0008764300002894743,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12859053399984077,
"count": 1,
"self": 0.12859053399984077
}
}
}
}
}
}
}