matthh's picture
First Push
c4b8899
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8475733399391174,
"min": 0.8475733399391174,
"max": 2.858381748199463,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8092.63037109375,
"min": 8092.63037109375,
"max": 29272.6875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.870394706726074,
"min": 0.29595136642456055,
"max": 12.870394706726074,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2509.72705078125,
"min": 57.41456604003906,
"max": 2596.689453125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06962247048824222,
"min": 0.0616809578571359,
"max": 0.07216842059564192,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27848988195296887,
"min": 0.2534245713939206,
"max": 0.35667823487892747,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1961197319860552,
"min": 0.13874914564475344,
"max": 0.2968122436719782,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7844789279442208,
"min": 0.5549965825790137,
"max": 1.4840612183598911,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.454545454545453,
"min": 4.0227272727272725,
"max": 25.672727272727272,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1120.0,
"min": 177.0,
"max": 1412.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.454545454545453,
"min": 4.0227272727272725,
"max": 25.672727272727272,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1120.0,
"min": 177.0,
"max": 1412.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674245681",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674246104"
},
"total": 422.8401937629999,
"count": 1,
"self": 0.38005003899979783,
"children": {
"run_training.setup": {
"total": 0.1092233510000824,
"count": 1,
"self": 0.1092233510000824
},
"TrainerController.start_learning": {
"total": 422.350920373,
"count": 1,
"self": 0.5558509550082817,
"children": {
"TrainerController._reset_env": {
"total": 10.056131315999892,
"count": 1,
"self": 10.056131315999892
},
"TrainerController.advance": {
"total": 411.62079338399167,
"count": 18201,
"self": 0.26466485000082685,
"children": {
"env_step": {
"total": 411.35612853399084,
"count": 18201,
"self": 270.2560117869739,
"children": {
"SubprocessEnvManager._take_step": {
"total": 140.83568642400837,
"count": 18201,
"self": 1.3616432970277401,
"children": {
"TorchPolicy.evaluate": {
"total": 139.47404312698063,
"count": 18201,
"self": 31.618180027991457,
"children": {
"TorchPolicy.sample_actions": {
"total": 107.85586309898918,
"count": 18201,
"self": 107.85586309898918
}
}
}
}
},
"workers": {
"total": 0.2644303230085825,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 421.1164115800168,
"count": 18201,
"is_parallel": true,
"self": 200.93976177103877,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00680834199988567,
"count": 1,
"is_parallel": true,
"self": 0.004090367999651789,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027179740002338804,
"count": 10,
"is_parallel": true,
"self": 0.0027179740002338804
}
}
},
"UnityEnvironment.step": {
"total": 0.04062657899999067,
"count": 1,
"is_parallel": true,
"self": 0.00033246600014535943,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003423949999614706,
"count": 1,
"is_parallel": true,
"self": 0.0003423949999614706
},
"communicator.exchange": {
"total": 0.03865458099994612,
"count": 1,
"is_parallel": true,
"self": 0.03865458099994612
},
"steps_from_proto": {
"total": 0.0012971369999377202,
"count": 1,
"is_parallel": true,
"self": 0.00035574500020629785,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009413919997314224,
"count": 10,
"is_parallel": true,
"self": 0.0009413919997314224
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 220.17664980897803,
"count": 18200,
"is_parallel": true,
"self": 8.272574469016035,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.849048320984366,
"count": 18200,
"is_parallel": true,
"self": 4.849048320984366
},
"communicator.exchange": {
"total": 175.82576235399415,
"count": 18200,
"is_parallel": true,
"self": 175.82576235399415
},
"steps_from_proto": {
"total": 31.229264664983475,
"count": 18200,
"is_parallel": true,
"self": 6.434977489974472,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.794287175009003,
"count": 182000,
"is_parallel": true,
"self": 24.794287175009003
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.347100002632942e-05,
"count": 1,
"self": 4.347100002632942e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 408.8623029399439,
"count": 342941,
"is_parallel": true,
"self": 8.712061183952528,
"children": {
"process_trajectory": {
"total": 234.8207699329903,
"count": 342941,
"is_parallel": true,
"self": 234.1616743659904,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6590955669998948,
"count": 4,
"is_parallel": true,
"self": 0.6590955669998948
}
}
},
"_update_policy": {
"total": 165.3294718230011,
"count": 90,
"is_parallel": true,
"self": 41.67163289699124,
"children": {
"TorchPPOOptimizer.update": {
"total": 123.65783892600984,
"count": 4587,
"is_parallel": true,
"self": 123.65783892600984
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11810124700014057,
"count": 1,
"self": 0.0011282180003036046,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11697302899983697,
"count": 1,
"self": 0.11697302899983697
}
}
}
}
}
}
}