ADG-2353's picture
First Push
ddbe4cb verified
raw
history blame
18.4 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1481189727783203,
"min": 1.1481189727783203,
"max": 2.861619710922241,
"count": 15
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11821.033203125,
"min": 11163.80078125,
"max": 29400.28125,
"count": 15
},
"SnowballTarget.Step.mean": {
"value": 149984.0,
"min": 9952.0,
"max": 149984.0,
"count": 15
},
"SnowballTarget.Step.sum": {
"value": 149984.0,
"min": 9952.0,
"max": 149984.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.836669921875,
"min": 0.33126920461654663,
"max": 11.836669921875,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2426.517333984375,
"min": 64.26622772216797,
"max": 2426.517333984375,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06101941802463604,
"min": 0.06101941802463604,
"max": 0.07635823901321546,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3050970901231802,
"min": 0.24763796339268046,
"max": 0.38179119506607734,
"count": 15
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21118846477246755,
"min": 0.12578009696467324,
"max": 0.29957225831115947,
"count": 15
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0559423238623378,
"min": 0.503120387858693,
"max": 1.444422147437638,
"count": 15
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 9.776096741333334e-06,
"min": 9.776096741333334e-06,
"max": 0.000289176003608,
"count": 15
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.888048370666667e-05,
"min": 4.888048370666667e-05,
"max": 0.0013468800510399999,
"count": 15
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10325866666666668,
"min": 0.10325866666666668,
"max": 0.19639199999999998,
"count": 15
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5162933333333334,
"min": 0.43943466666666664,
"max": 0.94896,
"count": 15
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00017260746666666673,
"min": 0.00017260746666666673,
"max": 0.004819960799999999,
"count": 15
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0008630373333333337,
"min": 0.0008630373333333337,
"max": 0.022453104,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.8,
"min": 3.727272727272727,
"max": 23.8,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1309.0,
"min": 164.0,
"max": 1309.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.8,
"min": 3.727272727272727,
"max": 23.8,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1309.0,
"min": 164.0,
"max": 1309.0,
"count": 15
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711911374",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711911930"
},
"total": 555.7844792319997,
"count": 1,
"self": 0.6003958419996707,
"children": {
"run_training.setup": {
"total": 0.11658476200000223,
"count": 1,
"self": 0.11658476200000223
},
"TrainerController.start_learning": {
"total": 555.0674986280001,
"count": 1,
"self": 0.7470609850099663,
"children": {
"TrainerController._reset_env": {
"total": 3.883263278999948,
"count": 1,
"self": 3.883263278999948
},
"TrainerController.advance": {
"total": 550.2896984419901,
"count": 13674,
"self": 0.41436715002873825,
"children": {
"env_step": {
"total": 549.8753312919614,
"count": 13674,
"self": 435.4152724129751,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.08179725697619,
"count": 13674,
"self": 2.2440220739729284,
"children": {
"TorchPolicy.evaluate": {
"total": 111.83777518300326,
"count": 13674,
"self": 111.83777518300326
}
}
},
"workers": {
"total": 0.3782616220100863,
"count": 13674,
"self": 0.0,
"children": {
"worker_root": {
"total": 553.2548403970068,
"count": 13674,
"is_parallel": true,
"self": 285.39852107002093,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008766864000108399,
"count": 1,
"is_parallel": true,
"self": 0.005949198000053002,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0028176660000553966,
"count": 10,
"is_parallel": true,
"self": 0.0028176660000553966
}
}
},
"UnityEnvironment.step": {
"total": 0.049924123999971926,
"count": 1,
"is_parallel": true,
"self": 0.0009013160001813958,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005103579999286012,
"count": 1,
"is_parallel": true,
"self": 0.0005103579999286012
},
"communicator.exchange": {
"total": 0.045988496000063606,
"count": 1,
"is_parallel": true,
"self": 0.045988496000063606
},
"steps_from_proto": {
"total": 0.002523953999798323,
"count": 1,
"is_parallel": true,
"self": 0.0004722830003629497,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020516709994353732,
"count": 10,
"is_parallel": true,
"self": 0.0020516709994353732
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 267.85631932698584,
"count": 13673,
"is_parallel": true,
"self": 13.0659507229484,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.4238631450105,
"count": 13673,
"is_parallel": true,
"self": 6.4238631450105
},
"communicator.exchange": {
"total": 210.7943007400122,
"count": 13673,
"is_parallel": true,
"self": 210.7943007400122
},
"steps_from_proto": {
"total": 37.572204719014735,
"count": 13673,
"is_parallel": true,
"self": 7.673211380999419,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.898993338015316,
"count": 136730,
"is_parallel": true,
"self": 29.898993338015316
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002443739999762329,
"count": 1,
"self": 0.0002443739999762329,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 544.3128802960612,
"count": 686330,
"is_parallel": true,
"self": 15.395007992722185,
"children": {
"process_trajectory": {
"total": 271.8564952943407,
"count": 686330,
"is_parallel": true,
"self": 271.18958128134045,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6669140130002233,
"count": 3,
"is_parallel": true,
"self": 0.6669140130002233
}
}
},
"_update_policy": {
"total": 257.0613770089983,
"count": 68,
"is_parallel": true,
"self": 48.12139923899872,
"children": {
"TorchPPOOptimizer.update": {
"total": 208.9399777699996,
"count": 3465,
"is_parallel": true,
"self": 208.9399777699996
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14723154800003613,
"count": 1,
"self": 0.003154880000238336,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1440766679997978,
"count": 1,
"self": 0.1440766679997978
}
}
}
}
}
}
}