yangwj2011's picture
First Push
086b215
raw
history blame
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8969525098800659,
"min": 0.8372588157653809,
"max": 2.866602897644043,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8554.236328125,
"min": 8201.578125,
"max": 29419.9453125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.737885475158691,
"min": 0.3342110216617584,
"max": 12.737885475158691,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2483.8876953125,
"min": 64.8369369506836,
"max": 2595.067138671875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06217644533057419,
"min": 0.06217644533057419,
"max": 0.07134428777795869,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24870578132229676,
"min": 0.24870578132229676,
"max": 0.35672143888979346,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1752825491860801,
"min": 0.1286728216093216,
"max": 0.2864871957138473,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7011301967443204,
"min": 0.5146912864372865,
"max": 1.315328472677399,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.477272727272727,
"min": 3.659090909090909,
"max": 25.477272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1121.0,
"min": 161.0,
"max": 1369.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.477272727272727,
"min": 3.659090909090909,
"max": 25.477272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1121.0,
"min": 161.0,
"max": 1369.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678990590",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678991049"
},
"total": 459.407127834,
"count": 1,
"self": 0.39011033600013434,
"children": {
"run_training.setup": {
"total": 0.10828356299998632,
"count": 1,
"self": 0.10828356299998632
},
"TrainerController.start_learning": {
"total": 458.9087339349999,
"count": 1,
"self": 0.5240285670024605,
"children": {
"TrainerController._reset_env": {
"total": 8.466070172999935,
"count": 1,
"self": 8.466070172999935
},
"TrainerController.advance": {
"total": 449.78747898199754,
"count": 18200,
"self": 0.3021794149908601,
"children": {
"env_step": {
"total": 449.4852995670067,
"count": 18200,
"self": 325.64066380999043,
"children": {
"SubprocessEnvManager._take_step": {
"total": 123.56785069201442,
"count": 18200,
"self": 2.771987748021047,
"children": {
"TorchPolicy.evaluate": {
"total": 120.79586294399337,
"count": 18200,
"self": 120.79586294399337
}
}
},
"workers": {
"total": 0.27678506500183175,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 457.4422964019939,
"count": 18200,
"is_parallel": true,
"self": 220.30583914398676,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005864240999926551,
"count": 1,
"is_parallel": true,
"self": 0.00449690600021313,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001367334999713421,
"count": 10,
"is_parallel": true,
"self": 0.001367334999713421
}
}
},
"UnityEnvironment.step": {
"total": 0.06738185200015323,
"count": 1,
"is_parallel": true,
"self": 0.0005966659998648538,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040737700010140543,
"count": 1,
"is_parallel": true,
"self": 0.00040737700010140543
},
"communicator.exchange": {
"total": 0.06457173000012517,
"count": 1,
"is_parallel": true,
"self": 0.06457173000012517
},
"steps_from_proto": {
"total": 0.0018060790000617999,
"count": 1,
"is_parallel": true,
"self": 0.00048670300020603463,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013193759998557653,
"count": 10,
"is_parallel": true,
"self": 0.0013193759998557653
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 237.1364572580071,
"count": 18199,
"is_parallel": true,
"self": 9.446961516994179,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.050350005005157,
"count": 18199,
"is_parallel": true,
"self": 5.050350005005157
},
"communicator.exchange": {
"total": 192.22715588900996,
"count": 18199,
"is_parallel": true,
"self": 192.22715588900996
},
"steps_from_proto": {
"total": 30.411989846997812,
"count": 18199,
"is_parallel": true,
"self": 6.003587862963741,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.40840198403407,
"count": 181990,
"is_parallel": true,
"self": 24.40840198403407
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010636799993335444,
"count": 1,
"self": 0.00010636799993335444,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 446.5745033669982,
"count": 400011,
"is_parallel": true,
"self": 9.802865201001396,
"children": {
"process_trajectory": {
"total": 247.64400811699397,
"count": 400011,
"is_parallel": true,
"self": 246.92445442799385,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7195536890001222,
"count": 4,
"is_parallel": true,
"self": 0.7195536890001222
}
}
},
"_update_policy": {
"total": 189.12763004900285,
"count": 90,
"is_parallel": true,
"self": 72.15922097400153,
"children": {
"TorchPPOOptimizer.update": {
"total": 116.96840907500132,
"count": 4587,
"is_parallel": true,
"self": 116.96840907500132
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13104984500000683,
"count": 1,
"self": 0.0009097290001136571,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13014011599989317,
"count": 1,
"self": 0.13014011599989317
}
}
}
}
}
}
}