pregonas's picture
First push
2f8a050
raw
history blame
No virus
18.4 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7655732035636902,
"min": 0.7655732035636902,
"max": 2.8725733757019043,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7318.1142578125,
"min": 7318.1142578125,
"max": 29386.42578125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.048530578613281,
"min": 0.3474101126194,
"max": 12.048530578613281,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2349.46337890625,
"min": 67.3975601196289,
"max": 2407.87451171875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0667594604582893,
"min": 0.06165375752244989,
"max": 0.07622424663086172,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2670378418331572,
"min": 0.24837828063908662,
"max": 0.3774015305942728,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20024365446000708,
"min": 0.12685778356321595,
"max": 0.2820342378143002,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8009746178400283,
"min": 0.5074311342528638,
"max": 1.326773105590951,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.25,
"min": 3.477272727272727,
"max": 24.25,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1067.0,
"min": 153.0,
"max": 1293.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.25,
"min": 3.477272727272727,
"max": 24.25,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1067.0,
"min": 153.0,
"max": 1293.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679231918",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679232375"
},
"total": 457.62724795200006,
"count": 1,
"self": 0.48760623099997247,
"children": {
"run_training.setup": {
"total": 0.1889293080000698,
"count": 1,
"self": 0.1889293080000698
},
"TrainerController.start_learning": {
"total": 456.950712413,
"count": 1,
"self": 0.5306185469944467,
"children": {
"TrainerController._reset_env": {
"total": 8.669087413999932,
"count": 1,
"self": 8.669087413999932
},
"TrainerController.advance": {
"total": 447.62166624200574,
"count": 18202,
"self": 0.2609101980167452,
"children": {
"env_step": {
"total": 447.360756043989,
"count": 18202,
"self": 326.26921365599765,
"children": {
"SubprocessEnvManager._take_step": {
"total": 120.83820092599467,
"count": 18202,
"self": 2.6500952969980744,
"children": {
"TorchPolicy.evaluate": {
"total": 118.1881056289966,
"count": 18202,
"self": 118.1881056289966
}
}
},
"workers": {
"total": 0.2533414619966834,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 455.60697510600903,
"count": 18202,
"is_parallel": true,
"self": 216.6428573210061,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007090177000009135,
"count": 1,
"is_parallel": true,
"self": 0.0048095449998299955,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002280632000179139,
"count": 10,
"is_parallel": true,
"self": 0.002280632000179139
}
}
},
"UnityEnvironment.step": {
"total": 0.03420779400005358,
"count": 1,
"is_parallel": true,
"self": 0.000529696000057811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00028933999999480875,
"count": 1,
"is_parallel": true,
"self": 0.00028933999999480875
},
"communicator.exchange": {
"total": 0.03145083499998691,
"count": 1,
"is_parallel": true,
"self": 0.03145083499998691
},
"steps_from_proto": {
"total": 0.0019379230000140524,
"count": 1,
"is_parallel": true,
"self": 0.00041536599985647626,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015225570001575761,
"count": 10,
"is_parallel": true,
"self": 0.0015225570001575761
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 238.96411778500294,
"count": 18201,
"is_parallel": true,
"self": 9.343171111000174,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.058271106006146,
"count": 18201,
"is_parallel": true,
"self": 5.058271106006146
},
"communicator.exchange": {
"total": 194.4011254880005,
"count": 18201,
"is_parallel": true,
"self": 194.4011254880005
},
"steps_from_proto": {
"total": 30.16155007999612,
"count": 18201,
"is_parallel": true,
"self": 5.8764773960245975,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.28507268397152,
"count": 182010,
"is_parallel": true,
"self": 24.28507268397152
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010242799999105046,
"count": 1,
"self": 0.00010242799999105046,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 444.47266094198187,
"count": 393678,
"is_parallel": true,
"self": 9.26500269500923,
"children": {
"process_trajectory": {
"total": 246.5420167159723,
"count": 393678,
"is_parallel": true,
"self": 245.37045974197235,
"children": {
"RLTrainer._checkpoint": {
"total": 1.171556973999941,
"count": 4,
"is_parallel": true,
"self": 1.171556973999941
}
}
},
"_update_policy": {
"total": 188.66564153100035,
"count": 90,
"is_parallel": true,
"self": 71.06868328999929,
"children": {
"TorchPPOOptimizer.update": {
"total": 117.59695824100106,
"count": 4587,
"is_parallel": true,
"self": 117.59695824100106
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12923778199990466,
"count": 1,
"self": 0.0008965989999296653,
"children": {
"RLTrainer._checkpoint": {
"total": 0.128341182999975,
"count": 1,
"self": 0.128341182999975
}
}
}
}
}
}
}