ldaquan1996's picture
First Push
c783e61
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1287016868591309,
"min": 1.1287016868591309,
"max": 2.860299587249756,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10789.259765625,
"min": 10789.259765625,
"max": 29386.71875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.56136417388916,
"min": 0.42831405997276306,
"max": 12.56136417388916,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2449.466064453125,
"min": 83.09292602539062,
"max": 2518.42626953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07234614758700071,
"min": 0.06346464154447341,
"max": 0.07399849729685515,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28938459034800285,
"min": 0.25385856617789365,
"max": 0.36443283581634295,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19744129559280826,
"min": 0.12063336572564189,
"max": 0.29417016452141836,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.789765182371233,
"min": 0.48253346290256754,
"max": 1.411202630283786,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.568181818181817,
"min": 3.25,
"max": 24.763636363636362,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1081.0,
"min": 143.0,
"max": 1362.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.568181818181817,
"min": 3.25,
"max": 24.763636363636362,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1081.0,
"min": 143.0,
"max": 1362.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679443947",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679444433"
},
"total": 485.99730680799996,
"count": 1,
"self": 0.4330716609999854,
"children": {
"run_training.setup": {
"total": 0.1032072200000016,
"count": 1,
"self": 0.1032072200000016
},
"TrainerController.start_learning": {
"total": 485.461027927,
"count": 1,
"self": 0.6317883499957588,
"children": {
"TrainerController._reset_env": {
"total": 9.17120914200001,
"count": 1,
"self": 9.17120914200001
},
"TrainerController.advance": {
"total": 475.5222376590042,
"count": 18203,
"self": 0.3480923360105521,
"children": {
"env_step": {
"total": 475.17414532299364,
"count": 18203,
"self": 344.50612449499175,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.3560956640016,
"count": 18203,
"self": 2.261910032988567,
"children": {
"TorchPolicy.evaluate": {
"total": 128.09418563101303,
"count": 18203,
"self": 128.09418563101303
}
}
},
"workers": {
"total": 0.3119251640002858,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 483.73482235600386,
"count": 18203,
"is_parallel": true,
"self": 225.47895240000406,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004748722999977417,
"count": 1,
"is_parallel": true,
"self": 0.0033297160000529402,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001419006999924477,
"count": 10,
"is_parallel": true,
"self": 0.001419006999924477
}
}
},
"UnityEnvironment.step": {
"total": 0.04288157499996714,
"count": 1,
"is_parallel": true,
"self": 0.00037039699998331344,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002945269999941047,
"count": 1,
"is_parallel": true,
"self": 0.0002945269999941047
},
"communicator.exchange": {
"total": 0.040340876000016124,
"count": 1,
"is_parallel": true,
"self": 0.040340876000016124
},
"steps_from_proto": {
"total": 0.0018757749999736006,
"count": 1,
"is_parallel": true,
"self": 0.00037448599994149845,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015012890000321022,
"count": 10,
"is_parallel": true,
"self": 0.0015012890000321022
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 258.2558699559998,
"count": 18202,
"is_parallel": true,
"self": 9.824119722009641,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.350957151003513,
"count": 18202,
"is_parallel": true,
"self": 5.350957151003513
},
"communicator.exchange": {
"total": 210.47536818398862,
"count": 18202,
"is_parallel": true,
"self": 210.47536818398862
},
"steps_from_proto": {
"total": 32.605424898998024,
"count": 18202,
"is_parallel": true,
"self": 6.73990881399385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.865516085004174,
"count": 182020,
"is_parallel": true,
"self": 25.865516085004174
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015246600003138155,
"count": 1,
"self": 0.00015246600003138155,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 471.8537700099765,
"count": 439635,
"is_parallel": true,
"self": 10.508469179973986,
"children": {
"process_trajectory": {
"total": 263.73302482300255,
"count": 439635,
"is_parallel": true,
"self": 262.8425930390026,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8904317839999294,
"count": 4,
"is_parallel": true,
"self": 0.8904317839999294
}
}
},
"_update_policy": {
"total": 197.61227600699993,
"count": 90,
"is_parallel": true,
"self": 69.84085060100011,
"children": {
"TorchPPOOptimizer.update": {
"total": 127.77142540599982,
"count": 4587,
"is_parallel": true,
"self": 127.77142540599982
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13564030999998522,
"count": 1,
"self": 0.0008952059999955964,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13474510399998962,
"count": 1,
"self": 0.13474510399998962
}
}
}
}
}
}
}