SnowballTarget / run_logs /timers.json
varo's picture
First Push
c7cbd78
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0629947185516357,
"min": 1.0629947185516357,
"max": 2.853997230529785,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10184.552734375,
"min": 10184.552734375,
"max": 29196.390625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.772660255432129,
"min": 0.32505810260772705,
"max": 12.772660255432129,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2490.668701171875,
"min": 63.06127166748047,
"max": 2575.4423828125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06789310773213704,
"min": 0.06398406166619822,
"max": 0.0741560998415248,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27157243092854816,
"min": 0.26814070572921384,
"max": 0.370780499207624,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22101247668558477,
"min": 0.12968356727951152,
"max": 0.29254896214195325,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8840499067423391,
"min": 0.5187342691180461,
"max": 1.4627448107097663,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.727272727272727,
"min": 3.3863636363636362,
"max": 25.490909090909092,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1088.0,
"min": 149.0,
"max": 1402.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.727272727272727,
"min": 3.3863636363636362,
"max": 25.490909090909092,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1088.0,
"min": 149.0,
"max": 1402.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686243726",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686244237"
},
"total": 511.11260580399994,
"count": 1,
"self": 0.4240298010004153,
"children": {
"run_training.setup": {
"total": 0.06239056899994466,
"count": 1,
"self": 0.06239056899994466
},
"TrainerController.start_learning": {
"total": 510.6261854339996,
"count": 1,
"self": 0.6536907369654728,
"children": {
"TrainerController._reset_env": {
"total": 4.452576729000157,
"count": 1,
"self": 4.452576729000157
},
"TrainerController.advance": {
"total": 505.38064967303353,
"count": 18206,
"self": 0.3011227710130697,
"children": {
"env_step": {
"total": 505.07952690202046,
"count": 18206,
"self": 365.8526361849749,
"children": {
"SubprocessEnvManager._take_step": {
"total": 138.92391830501765,
"count": 18206,
"self": 2.1226161180361487,
"children": {
"TorchPolicy.evaluate": {
"total": 136.8013021869815,
"count": 18206,
"self": 136.8013021869815
}
}
},
"workers": {
"total": 0.3029724120278843,
"count": 18206,
"self": 0.0,
"children": {
"worker_root": {
"total": 508.704357688001,
"count": 18206,
"is_parallel": true,
"self": 239.90498363299866,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029386770002020057,
"count": 1,
"is_parallel": true,
"self": 0.0007997269995030365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021389500006989692,
"count": 10,
"is_parallel": true,
"self": 0.0021389500006989692
}
}
},
"UnityEnvironment.step": {
"total": 0.03543703600007575,
"count": 1,
"is_parallel": true,
"self": 0.0005874520002180361,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003256830000282207,
"count": 1,
"is_parallel": true,
"self": 0.0003256830000282207
},
"communicator.exchange": {
"total": 0.03243442999973922,
"count": 1,
"is_parallel": true,
"self": 0.03243442999973922
},
"steps_from_proto": {
"total": 0.0020894710000902705,
"count": 1,
"is_parallel": true,
"self": 0.0003781969999181456,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001711274000172125,
"count": 10,
"is_parallel": true,
"self": 0.001711274000172125
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 268.7993740550023,
"count": 18205,
"is_parallel": true,
"self": 10.983437682008116,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.014158724961817,
"count": 18205,
"is_parallel": true,
"self": 6.014158724961817
},
"communicator.exchange": {
"total": 212.79225344901624,
"count": 18205,
"is_parallel": true,
"self": 212.79225344901624
},
"steps_from_proto": {
"total": 39.00952419901614,
"count": 18205,
"is_parallel": true,
"self": 7.171393315178193,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.838130883837948,
"count": 182050,
"is_parallel": true,
"self": 31.838130883837948
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015679300031479215,
"count": 1,
"self": 0.00015679300031479215,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 501.3334425659041,
"count": 485821,
"is_parallel": true,
"self": 10.938737156138814,
"children": {
"process_trajectory": {
"total": 275.0488894597638,
"count": 485821,
"is_parallel": true,
"self": 273.68149758576374,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3673918740000772,
"count": 4,
"is_parallel": true,
"self": 1.3673918740000772
}
}
},
"_update_policy": {
"total": 215.34581595000145,
"count": 90,
"is_parallel": true,
"self": 84.47682597000312,
"children": {
"TorchPPOOptimizer.update": {
"total": 130.86898997999833,
"count": 4584,
"is_parallel": true,
"self": 130.86898997999833
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13911150200010525,
"count": 1,
"self": 0.0008934269999372191,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13821807500016803,
"count": 1,
"self": 0.13821807500016803
}
}
}
}
}
}
}