ChechkovEugene's picture
First Push
3d8eb7e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.789790689945221,
"min": 0.789790689945221,
"max": 2.8516151905059814,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7532.23388671875,
"min": 7532.23388671875,
"max": 29234.7578125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.553600311279297,
"min": 0.31576886773109436,
"max": 12.565973281860352,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2447.9521484375,
"min": 61.259159088134766,
"max": 2559.8203125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06633490494734325,
"min": 0.05883006659760391,
"max": 0.07328271560978536,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.265339619789373,
"min": 0.23532026639041564,
"max": 0.3541172459606291,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2226457012196382,
"min": 0.10835672097677804,
"max": 0.29302526477213,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8905828048785528,
"min": 0.43342688390711215,
"max": 1.4457648639001097,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.431818181818183,
"min": 3.2045454545454546,
"max": 24.954545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1075.0,
"min": 141.0,
"max": 1354.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.431818181818183,
"min": 3.2045454545454546,
"max": 24.954545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1075.0,
"min": 141.0,
"max": 1354.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678708943",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678709447"
},
"total": 503.5343705660001,
"count": 1,
"self": 0.4466016110002329,
"children": {
"run_training.setup": {
"total": 0.10529778699992676,
"count": 1,
"self": 0.10529778699992676
},
"TrainerController.start_learning": {
"total": 502.98247116799996,
"count": 1,
"self": 0.6605594869967035,
"children": {
"TrainerController._reset_env": {
"total": 9.693524621999927,
"count": 1,
"self": 9.693524621999927
},
"TrainerController.advance": {
"total": 492.4855847980033,
"count": 18201,
"self": 0.3377977729927579,
"children": {
"env_step": {
"total": 492.14778702501053,
"count": 18201,
"self": 355.63209381803574,
"children": {
"SubprocessEnvManager._take_step": {
"total": 136.0126664359792,
"count": 18201,
"self": 2.2921918159628376,
"children": {
"TorchPolicy.evaluate": {
"total": 133.72047462001638,
"count": 18201,
"self": 133.72047462001638
}
}
},
"workers": {
"total": 0.5030267709955751,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 501.31981936898444,
"count": 18201,
"is_parallel": true,
"self": 237.22169087699592,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006739766000009695,
"count": 1,
"is_parallel": true,
"self": 0.0047242389997563805,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020155270002533143,
"count": 10,
"is_parallel": true,
"self": 0.0020155270002533143
}
}
},
"UnityEnvironment.step": {
"total": 0.06989678499996899,
"count": 1,
"is_parallel": true,
"self": 0.0006936530000984931,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00036460799992710236,
"count": 1,
"is_parallel": true,
"self": 0.00036460799992710236
},
"communicator.exchange": {
"total": 0.06640127899993331,
"count": 1,
"is_parallel": true,
"self": 0.06640127899993331
},
"steps_from_proto": {
"total": 0.00243724500001008,
"count": 1,
"is_parallel": true,
"self": 0.00041529100019488396,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002021953999815196,
"count": 10,
"is_parallel": true,
"self": 0.002021953999815196
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 264.0981284919885,
"count": 18200,
"is_parallel": true,
"self": 10.351048845991158,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.706464272997891,
"count": 18200,
"is_parallel": true,
"self": 5.706464272997891
},
"communicator.exchange": {
"total": 213.10450206699602,
"count": 18200,
"is_parallel": true,
"self": 213.10450206699602
},
"steps_from_proto": {
"total": 34.93611330600345,
"count": 18200,
"is_parallel": true,
"self": 7.144953224954634,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.791160081048815,
"count": 182000,
"is_parallel": true,
"self": 27.791160081048815
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00023254300003827666,
"count": 1,
"self": 0.00023254300003827666,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 488.6465625500176,
"count": 441939,
"is_parallel": true,
"self": 11.159618020983999,
"children": {
"process_trajectory": {
"total": 272.9937724030351,
"count": 441939,
"is_parallel": true,
"self": 272.0877884310353,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9059839719998308,
"count": 4,
"is_parallel": true,
"self": 0.9059839719998308
}
}
},
"_update_policy": {
"total": 204.4931721259985,
"count": 90,
"is_parallel": true,
"self": 73.27936789900252,
"children": {
"TorchPPOOptimizer.update": {
"total": 131.213804226996,
"count": 4587,
"is_parallel": true,
"self": 131.213804226996
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1425697180000043,
"count": 1,
"self": 0.001183841999818469,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14138587600018582,
"count": 1,
"self": 0.14138587600018582
}
}
}
}
}
}
}