jjhonny's picture
First Push
9f23233
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8763975501060486,
"min": 0.8763975501060486,
"max": 2.8572945594787598,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8367.84375,
"min": 8367.84375,
"max": 29261.5546875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.6689414978027344,
"min": 0.2662430703639984,
"max": 2.707690954208374,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 520.443603515625,
"min": 51.65115737915039,
"max": 555.0366821289062,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.10003961869969155,
"min": 0.0905248123767349,
"max": 0.107811702386443,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.4001584747987662,
"min": 0.3620992495069396,
"max": 0.539058511932215,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.12967791397343661,
"min": 0.07576893718522518,
"max": 0.18305932971070918,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5187116558937465,
"min": 0.3030757487409007,
"max": 0.9152966485535459,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000007e-06,
"min": 8.082097306000007e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400003e-05,
"min": 3.232838922400003e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.104041,
"min": 0.104041,
"max": 0.24594100000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.416164,
"min": 0.416164,
"max": 1.19258,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.004864970599999999,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828000000003,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.068181818181817,
"min": 4.0227272727272725,
"max": 26.704545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1147.0,
"min": 177.0,
"max": 1445.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.068181818181817,
"min": 4.0227272727272725,
"max": 26.704545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1147.0,
"min": 177.0,
"max": 1445.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685810904",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685811412"
},
"total": 507.38135558800013,
"count": 1,
"self": 0.4349897910001346,
"children": {
"run_training.setup": {
"total": 0.03756740200014974,
"count": 1,
"self": 0.03756740200014974
},
"TrainerController.start_learning": {
"total": 506.90879839499985,
"count": 1,
"self": 0.6055804119966979,
"children": {
"TrainerController._reset_env": {
"total": 3.5012593080000443,
"count": 1,
"self": 3.5012593080000443
},
"TrainerController.advance": {
"total": 502.6563845340031,
"count": 18202,
"self": 0.26914107899415285,
"children": {
"env_step": {
"total": 502.38724345500896,
"count": 18202,
"self": 388.493982257005,
"children": {
"SubprocessEnvManager._take_step": {
"total": 113.6263938040081,
"count": 18202,
"self": 1.696102391008253,
"children": {
"TorchPolicy.evaluate": {
"total": 111.93029141299985,
"count": 18202,
"self": 111.93029141299985
}
}
},
"workers": {
"total": 0.2668673939958808,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 505.20555716499484,
"count": 18202,
"is_parallel": true,
"self": 259.1568083640068,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019170760001543385,
"count": 1,
"is_parallel": true,
"self": 0.0005860980002125871,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013309779999417515,
"count": 10,
"is_parallel": true,
"self": 0.0013309779999417515
}
}
},
"UnityEnvironment.step": {
"total": 0.03464088000009724,
"count": 1,
"is_parallel": true,
"self": 0.0005526850000023842,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004172619999280869,
"count": 1,
"is_parallel": true,
"self": 0.0004172619999280869
},
"communicator.exchange": {
"total": 0.031639162000146825,
"count": 1,
"is_parallel": true,
"self": 0.031639162000146825
},
"steps_from_proto": {
"total": 0.002031771000019944,
"count": 1,
"is_parallel": true,
"self": 0.00039471099967158807,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001637060000348356,
"count": 10,
"is_parallel": true,
"self": 0.001637060000348356
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 246.04874880098805,
"count": 18201,
"is_parallel": true,
"self": 9.509541083990825,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.192690856009676,
"count": 18201,
"is_parallel": true,
"self": 5.192690856009676
},
"communicator.exchange": {
"total": 198.39764851198765,
"count": 18201,
"is_parallel": true,
"self": 198.39764851198765
},
"steps_from_proto": {
"total": 32.948868348999895,
"count": 18201,
"is_parallel": true,
"self": 6.371701255972539,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.577167093027356,
"count": 182010,
"is_parallel": true,
"self": 26.577167093027356
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013385600004767184,
"count": 1,
"self": 0.00013385600004767184,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 499.283321949938,
"count": 437737,
"is_parallel": true,
"self": 9.527853400910544,
"children": {
"process_trajectory": {
"total": 247.60172004002675,
"count": 437737,
"is_parallel": true,
"self": 246.41495982102674,
"children": {
"RLTrainer._checkpoint": {
"total": 1.186760219000007,
"count": 4,
"is_parallel": true,
"self": 1.186760219000007
}
}
},
"_update_policy": {
"total": 242.1537485090007,
"count": 90,
"is_parallel": true,
"self": 67.51354415201263,
"children": {
"TorchPPOOptimizer.update": {
"total": 174.64020435698808,
"count": 9177,
"is_parallel": true,
"self": 174.64020435698808
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14544028499994965,
"count": 1,
"self": 0.001037149000012505,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14440313599993715,
"count": 1,
"self": 0.14440313599993715
}
}
}
}
}
}
}