dcduplooy's picture
First Push
709669b
raw
history blame contribute delete
No virus
19 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9198384881019592,
"min": 0.9198384881019592,
"max": 2.8690733909606934,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8782.6181640625,
"min": 8782.6181640625,
"max": 29382.1796875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.942930221557617,
"min": 0.2794888913631439,
"max": 12.942930221557617,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2523.871337890625,
"min": 54.22084426879883,
"max": 2602.950927734375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07495134262574663,
"min": 0.06396080380940225,
"max": 0.07495134262574663,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2998053705029865,
"min": 0.2612323227476023,
"max": 0.3570865439536397,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19461233424497584,
"min": 0.09276995781179079,
"max": 0.27108435223207755,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7784493369799034,
"min": 0.37107983124716315,
"max": 1.3373788665322697,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.90909090909091,
"min": 2.8636363636363638,
"max": 25.90909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1140.0,
"min": 126.0,
"max": 1419.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.90909090909091,
"min": 2.8636363636363638,
"max": 25.90909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1140.0,
"min": 126.0,
"max": 1419.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678034019",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678034466"
},
"total": 447.13418132699996,
"count": 1,
"self": 0.3922857649998832,
"children": {
"run_training.setup": {
"total": 0.11099902200004408,
"count": 1,
"self": 0.11099902200004408
},
"TrainerController.start_learning": {
"total": 446.63089654000004,
"count": 1,
"self": 0.5251913389913057,
"children": {
"TrainerController._reset_env": {
"total": 8.872696229000042,
"count": 1,
"self": 8.872696229000042
},
"TrainerController.advance": {
"total": 437.11336933200863,
"count": 18202,
"self": 0.26747176200314016,
"children": {
"env_step": {
"total": 436.8458975700055,
"count": 18202,
"self": 300.4721887299975,
"children": {
"SubprocessEnvManager._take_step": {
"total": 136.11361653800185,
"count": 18202,
"self": 1.4448021290010615,
"children": {
"TorchPolicy.evaluate": {
"total": 134.6688144090008,
"count": 18202,
"self": 30.25734530999665,
"children": {
"TorchPolicy.sample_actions": {
"total": 104.41146909900414,
"count": 18202,
"self": 104.41146909900414
}
}
}
}
},
"workers": {
"total": 0.26009230200611455,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 445.2543237829973,
"count": 18202,
"is_parallel": true,
"self": 215.57725545598845,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0051117600000338825,
"count": 1,
"is_parallel": true,
"self": 0.0037295900000344773,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013821699999994053,
"count": 10,
"is_parallel": true,
"self": 0.0013821699999994053
}
}
},
"UnityEnvironment.step": {
"total": 0.0380423479999763,
"count": 1,
"is_parallel": true,
"self": 0.0003937429999609776,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00029105900000558904,
"count": 1,
"is_parallel": true,
"self": 0.00029105900000558904
},
"communicator.exchange": {
"total": 0.035698968000019704,
"count": 1,
"is_parallel": true,
"self": 0.035698968000019704
},
"steps_from_proto": {
"total": 0.0016585779999900296,
"count": 1,
"is_parallel": true,
"self": 0.0003809189998946749,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012776590000953547,
"count": 10,
"is_parallel": true,
"self": 0.0012776590000953547
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 229.67706832700884,
"count": 18201,
"is_parallel": true,
"self": 9.40379746600297,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.165627087002065,
"count": 18201,
"is_parallel": true,
"self": 5.165627087002065
},
"communicator.exchange": {
"total": 184.95797234699586,
"count": 18201,
"is_parallel": true,
"self": 184.95797234699586
},
"steps_from_proto": {
"total": 30.149671427007945,
"count": 18201,
"is_parallel": true,
"self": 6.511740755001256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.63793067200669,
"count": 182010,
"is_parallel": true,
"self": 23.63793067200669
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011487400001897186,
"count": 1,
"self": 0.00011487400001897186,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 433.86321190902623,
"count": 389324,
"is_parallel": true,
"self": 9.85751160304693,
"children": {
"process_trajectory": {
"total": 249.72059568097882,
"count": 389324,
"is_parallel": true,
"self": 248.58403401897874,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1365616620000765,
"count": 4,
"is_parallel": true,
"self": 1.1365616620000765
}
}
},
"_update_policy": {
"total": 174.28510462500049,
"count": 90,
"is_parallel": true,
"self": 60.78995237999902,
"children": {
"TorchPPOOptimizer.update": {
"total": 113.49515224500146,
"count": 4587,
"is_parallel": true,
"self": 113.49515224500146
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11952476600004047,
"count": 1,
"self": 0.0008330489999934798,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11869171700004699,
"count": 1,
"self": 0.11869171700004699
}
}
}
}
}
}
}