poca-SoccerTwos / run_logs /timers.json
Beniuv's picture
First Push`
72dee6b verified
raw
history blame
No virus
15.5 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.218026876449585,
"min": 3.2003042697906494,
"max": 3.2957470417022705,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 43662.1875,
"min": 30325.921875,
"max": 105463.90625,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 963.6,
"min": 443.5,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19272.0,
"min": 16416.0,
"max": 25480.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1190.5746398791896,
"min": 1188.4673514083206,
"max": 1199.1445113828395,
"count": 43
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2381.149279758379,
"min": 2376.934702816641,
"max": 14364.40571763741,
"count": 43
},
"SoccerTwos.Step.mean": {
"value": 499572.0,
"min": 9758.0,
"max": 499572.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499572.0,
"min": 9758.0,
"max": 499572.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.00011170967627549544,
"min": -0.0051085129380226135,
"max": 0.09762725234031677,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.0011170967482030392,
"min": -0.06130215525627136,
"max": 1.4644087553024292,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.00048490139306522906,
"min": -0.004349739756435156,
"max": 0.09764755517244339,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.00484901387244463,
"min": -0.05219687893986702,
"max": 1.4647133350372314,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.6662666666631897,
"max": 0.2737714307648795,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -9.993999999947846,
"max": 3.832800030708313,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.6662666666631897,
"max": 0.2737714307648795,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -9.993999999947846,
"max": 3.832800030708313,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018008682879250653,
"min": 0.011077817843276232,
"max": 0.020610303218321253,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018008682879250653,
"min": 0.011077817843276232,
"max": 0.020610303218321253,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0008591158400425532,
"min": 0.0008100116940719696,
"max": 0.006901469205816587,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0008591158400425532,
"min": 0.0008100116940719696,
"max": 0.006901469205816587,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0008535984105643972,
"min": 0.0008109512787389879,
"max": 0.006317682222773632,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0008535984105643972,
"min": 0.0008109512787389879,
"max": 0.006317682222773632,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711121568",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Computer\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1711126118"
},
"total": 4549.7723790999735,
"count": 1,
"self": 0.6771422999445349,
"children": {
"run_training.setup": {
"total": 0.37549200002104044,
"count": 1,
"self": 0.37549200002104044
},
"TrainerController.start_learning": {
"total": 4548.719744800008,
"count": 1,
"self": 1.8136510997428559,
"children": {
"TrainerController._reset_env": {
"total": 13.482682200032286,
"count": 3,
"self": 13.482682200032286
},
"TrainerController.advance": {
"total": 4533.081959500152,
"count": 32810,
"self": 2.014280197559856,
"children": {
"env_step": {
"total": 1575.299528304895,
"count": 32810,
"self": 1171.2253187116585,
"children": {
"SubprocessEnvManager._take_step": {
"total": 402.9909484978998,
"count": 32810,
"self": 11.82861390087055,
"children": {
"TorchPolicy.evaluate": {
"total": 391.16233459702926,
"count": 65152,
"self": 391.16233459702926
}
}
},
"workers": {
"total": 1.0832610953366384,
"count": 32810,
"self": 0.0,
"children": {
"worker_root": {
"total": 4534.897381497431,
"count": 32810,
"is_parallel": true,
"self": 3634.059834805026,
"children": {
"steps_from_proto": {
"total": 0.01537760003702715,
"count": 6,
"is_parallel": true,
"self": 0.0033713001757860184,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.012006299861241132,
"count": 24,
"is_parallel": true,
"self": 0.012006299861241132
}
}
},
"UnityEnvironment.step": {
"total": 900.8221690923674,
"count": 32810,
"is_parallel": true,
"self": 43.885291585174855,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 45.92007690249011,
"count": 32810,
"is_parallel": true,
"self": 45.92007690249011
},
"communicator.exchange": {
"total": 660.5651690016384,
"count": 32810,
"is_parallel": true,
"self": 660.5651690016384
},
"steps_from_proto": {
"total": 150.45163160306402,
"count": 65620,
"is_parallel": true,
"self": 29.361153592239134,
"children": {
"_process_rank_one_or_two_observation": {
"total": 121.09047801082488,
"count": 262480,
"is_parallel": true,
"self": 121.09047801082488
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2955.7681509976974,
"count": 32810,
"self": 11.327981101931073,
"children": {
"process_trajectory": {
"total": 368.7541753957048,
"count": 32810,
"self": 368.10931289568543,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6448625000193715,
"count": 1,
"self": 0.6448625000193715
}
}
},
"_update_policy": {
"total": 2575.6859945000615,
"count": 23,
"self": 236.94466579932487,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2338.7413287007366,
"count": 690,
"self": 2338.7413287007366
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.300032742321491e-06,
"count": 1,
"self": 4.300032742321491e-06
},
"TrainerController._save_models": {
"total": 0.3414477000478655,
"count": 1,
"self": 0.017190900049172342,
"children": {
"RLTrainer._checkpoint": {
"total": 0.32425679999869317,
"count": 1,
"self": 0.32425679999869317
}
}
}
}
}
}
}