max-fofanov's picture
First Push
f2142ed verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9211111068725586,
"min": 0.9211111068725586,
"max": 2.870500326156616,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8754.240234375,
"min": 8754.240234375,
"max": 29302.068359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.721619606018066,
"min": 0.2853982150554657,
"max": 12.721619606018066,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2480.7158203125,
"min": 55.367252349853516,
"max": 2581.176025390625,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06869979535957671,
"min": 0.06230679360919121,
"max": 0.07515160377107688,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27479918143830684,
"min": 0.24922717443676484,
"max": 0.3594608847455603,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22315458168147825,
"min": 0.11283515821046688,
"max": 0.29144429473900324,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.892618326725913,
"min": 0.45134063284186754,
"max": 1.449108677459698,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.295454545454547,
"min": 2.9318181818181817,
"max": 25.295454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1113.0,
"min": 129.0,
"max": 1387.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.295454545454547,
"min": 2.9318181818181817,
"max": 25.295454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1113.0,
"min": 129.0,
"max": 1387.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731165532",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1731165954"
},
"total": 422.17339538300007,
"count": 1,
"self": 0.7798687949999703,
"children": {
"run_training.setup": {
"total": 0.05320806400004585,
"count": 1,
"self": 0.05320806400004585
},
"TrainerController.start_learning": {
"total": 421.34031852400005,
"count": 1,
"self": 0.33914489700430295,
"children": {
"TrainerController._reset_env": {
"total": 1.9069784420000815,
"count": 1,
"self": 1.9069784420000815
},
"TrainerController.advance": {
"total": 418.9570084169957,
"count": 18192,
"self": 0.36792027100034375,
"children": {
"env_step": {
"total": 297.33071537700005,
"count": 18192,
"self": 226.26653139100745,
"children": {
"SubprocessEnvManager._take_step": {
"total": 70.84081459400034,
"count": 18192,
"self": 1.2599665890102187,
"children": {
"TorchPolicy.evaluate": {
"total": 69.58084800499012,
"count": 18192,
"self": 69.58084800499012
}
}
},
"workers": {
"total": 0.22336939199226435,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 419.5472597150051,
"count": 18192,
"is_parallel": true,
"self": 221.3738541839998,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0031065680000210705,
"count": 1,
"is_parallel": true,
"self": 0.0008088060002364728,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022977619997845977,
"count": 10,
"is_parallel": true,
"self": 0.0022977619997845977
}
}
},
"UnityEnvironment.step": {
"total": 0.04131249800002479,
"count": 1,
"is_parallel": true,
"self": 0.0006982300001254771,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003707429999622036,
"count": 1,
"is_parallel": true,
"self": 0.0003707429999622036
},
"communicator.exchange": {
"total": 0.0379393869999376,
"count": 1,
"is_parallel": true,
"self": 0.0379393869999376
},
"steps_from_proto": {
"total": 0.0023041379999995115,
"count": 1,
"is_parallel": true,
"self": 0.00037924499986274895,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019248930001367626,
"count": 10,
"is_parallel": true,
"self": 0.0019248930001367626
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 198.1734055310053,
"count": 18191,
"is_parallel": true,
"self": 9.808452223003542,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.385889177993818,
"count": 18191,
"is_parallel": true,
"self": 5.385889177993818
},
"communicator.exchange": {
"total": 151.04772930700335,
"count": 18191,
"is_parallel": true,
"self": 151.04772930700335
},
"steps_from_proto": {
"total": 31.93133482300459,
"count": 18191,
"is_parallel": true,
"self": 5.735876700998006,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.195458122006585,
"count": 181910,
"is_parallel": true,
"self": 26.195458122006585
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 121.25837276899529,
"count": 18192,
"self": 0.38729556900239004,
"children": {
"process_trajectory": {
"total": 28.419815529993457,
"count": 18192,
"self": 27.91601563399331,
"children": {
"RLTrainer._checkpoint": {
"total": 0.503799896000146,
"count": 4,
"self": 0.503799896000146
}
}
},
"_update_policy": {
"total": 92.45126166999944,
"count": 90,
"self": 37.54639422599985,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.90486744399959,
"count": 4587,
"self": 54.90486744399959
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2660000265896088e-06,
"count": 1,
"self": 1.2660000265896088e-06
},
"TrainerController._save_models": {
"total": 0.1371855019999657,
"count": 1,
"self": 0.0012760340000568249,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13590946799990888,
"count": 1,
"self": 0.13590946799990888
}
}
}
}
}
}
}