mark-e's picture
second push
724d272
raw
history blame
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.948525071144104,
"min": 0.948525071144104,
"max": 2.8652477264404297,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9066.951171875,
"min": 9066.951171875,
"max": 29406.037109375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.973207473754883,
"min": 0.3863008916378021,
"max": 12.973207473754883,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2529.775390625,
"min": 74.94237518310547,
"max": 2620.40771484375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07588126734538259,
"min": 0.06383201489955478,
"max": 0.07588126734538259,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.30352506938153034,
"min": 0.25532805959821914,
"max": 0.356467714498114,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19605158868373612,
"min": 0.10328806343479266,
"max": 0.27717882862278065,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7842063547349445,
"min": 0.41315225373917064,
"max": 1.3858941431139031,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.65909090909091,
"min": 2.977272727272727,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1129.0,
"min": 131.0,
"max": 1394.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.65909090909091,
"min": 2.977272727272727,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1129.0,
"min": 131.0,
"max": 1394.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680284212",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/home/u/miniconda3/envs/huggingface-unity/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1680284533"
},
"total": 320.75122395000017,
"count": 1,
"self": 0.21768646900000022,
"children": {
"run_training.setup": {
"total": 0.0071148589995573275,
"count": 1,
"self": 0.0071148589995573275
},
"TrainerController.start_learning": {
"total": 320.5264226220006,
"count": 1,
"self": 0.25077017797866574,
"children": {
"TrainerController._reset_env": {
"total": 2.2422614650004107,
"count": 1,
"self": 2.2422614650004107
},
"TrainerController.advance": {
"total": 317.94355877902217,
"count": 18201,
"self": 0.1255732189911214,
"children": {
"env_step": {
"total": 317.81798556003105,
"count": 18201,
"self": 183.7982844709777,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.89295002300332,
"count": 18201,
"self": 0.8289285209566515,
"children": {
"TorchPolicy.evaluate": {
"total": 133.06402150204667,
"count": 18201,
"self": 133.06402150204667
}
}
},
"workers": {
"total": 0.1267510660500193,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 319.8889944699995,
"count": 18201,
"is_parallel": true,
"self": 185.58709164506035,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009026980005728547,
"count": 1,
"is_parallel": true,
"self": 0.0002647160008564242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006379819997164304,
"count": 10,
"is_parallel": true,
"self": 0.0006379819997164304
}
}
},
"UnityEnvironment.step": {
"total": 0.01593817899993155,
"count": 1,
"is_parallel": true,
"self": 0.00018892800017056288,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00015162799991230713,
"count": 1,
"is_parallel": true,
"self": 0.00015162799991230713
},
"communicator.exchange": {
"total": 0.015005291000306897,
"count": 1,
"is_parallel": true,
"self": 0.015005291000306897
},
"steps_from_proto": {
"total": 0.0005923319995417842,
"count": 1,
"is_parallel": true,
"self": 0.00013659899923368357,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004557330003081006,
"count": 10,
"is_parallel": true,
"self": 0.0004557330003081006
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 134.30190282493913,
"count": 18200,
"is_parallel": true,
"self": 3.3185713517013937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.8033410330672268,
"count": 18200,
"is_parallel": true,
"self": 1.8033410330672268
},
"communicator.exchange": {
"total": 119.24831985009587,
"count": 18200,
"is_parallel": true,
"self": 119.24831985009587
},
"steps_from_proto": {
"total": 9.931670590074646,
"count": 18200,
"is_parallel": true,
"self": 2.0430779288954,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.888592661179246,
"count": 182000,
"is_parallel": true,
"self": 7.888592661179246
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.312999953981489e-05,
"count": 1,
"self": 4.312999953981489e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 316.38345145829953,
"count": 335918,
"is_parallel": true,
"self": 4.256159947394735,
"children": {
"process_trajectory": {
"total": 167.46870303490778,
"count": 335918,
"is_parallel": true,
"self": 167.0169628799067,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4517401550010618,
"count": 4,
"is_parallel": true,
"self": 0.4517401550010618
}
}
},
"_update_policy": {
"total": 144.65858847599702,
"count": 90,
"is_parallel": true,
"self": 23.708663277975575,
"children": {
"TorchPPOOptimizer.update": {
"total": 120.94992519802145,
"count": 4587,
"is_parallel": true,
"self": 120.94992519802145
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08978906999982428,
"count": 1,
"self": 0.0005484129997057607,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08924065700011852,
"count": 1,
"self": 0.08924065700011852
}
}
}
}
}
}
}