Run4 / run_logs /timers.json
H3lt3r-Sk3lt3r's picture
WhyIsThisBroke
90393d0 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7744948863983154,
"min": 1.7436292171478271,
"max": 3.2957029342651367,
"count": 700
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35489.8984375,
"min": 29211.130859375,
"max": 105462.40625,
"count": 700
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.36666666666667,
"min": 42.8859649122807,
"max": 999.0,
"count": 700
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19572.0,
"min": 16540.0,
"max": 23900.0,
"count": 700
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1622.1232835749888,
"min": 1191.2334403090224,
"max": 1637.8840299324497,
"count": 693
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 291982.191043498,
"min": 2384.361400786099,
"max": 364740.8570305848,
"count": 693
},
"SoccerTwos.Step.mean": {
"value": 6999972.0,
"min": 9970.0,
"max": 6999972.0,
"count": 700
},
"SoccerTwos.Step.sum": {
"value": 6999972.0,
"min": 9970.0,
"max": 6999972.0,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.07393477857112885,
"min": -0.15651123225688934,
"max": 0.21851611137390137,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -13.382194519042969,
"min": -24.04785919189453,
"max": 30.810771942138672,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07746528089046478,
"min": -0.15395240485668182,
"max": 0.22302763164043427,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -14.02121639251709,
"min": -24.890514373779297,
"max": 31.446895599365234,
"count": 700
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 700
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.2283226541392711,
"min": -0.5029379293836397,
"max": 0.45994286239147186,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -41.32640039920807,
"min": -54.488800287246704,
"max": 54.95440024137497,
"count": 700
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.2283226541392711,
"min": -0.5029379293836397,
"max": 0.45994286239147186,
"count": 700
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -41.32640039920807,
"min": -54.488800287246704,
"max": 54.95440024137497,
"count": 700
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 700
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 700
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01195489305537194,
"min": 0.011874554418803503,
"max": 0.02438570779049769,
"count": 337
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01195489305537194,
"min": 0.011874554418803503,
"max": 0.02438570779049769,
"count": 337
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.28759366472562153,
"min": 0.0014461240110297997,
"max": 0.30876916845639546,
"count": 337
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.28759366472562153,
"min": 0.0014461240110297997,
"max": 0.30876916845639546,
"count": 337
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.2907094985246658,
"min": 0.0015270562920098503,
"max": 0.3126795470714569,
"count": 337
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.2907094985246658,
"min": 0.0015270562920098503,
"max": 0.3126795470714569,
"count": 337
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 337
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 337
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 337
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 337
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 337
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 337
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717833617",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\wte42\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=Run4 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1717882158"
},
"total": 48540.255755900056,
"count": 1,
"self": 1.7802201999584213,
"children": {
"run_training.setup": {
"total": 0.1558619000716135,
"count": 1,
"self": 0.1558619000716135
},
"TrainerController.start_learning": {
"total": 48538.319673800026,
"count": 1,
"self": 17.576804901007563,
"children": {
"TrainerController._reset_env": {
"total": 19.757410200312734,
"count": 35,
"self": 19.757410200312734
},
"TrainerController.advance": {
"total": 48500.71421439876,
"count": 477811,
"self": 15.678254023310728,
"children": {
"env_step": {
"total": 16128.576340737636,
"count": 477811,
"self": 12238.504391544266,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3879.1602508283686,
"count": 477811,
"self": 99.7128643838223,
"children": {
"TorchPolicy.evaluate": {
"total": 3779.4473864445463,
"count": 881388,
"self": 3779.4473864445463
}
}
},
"workers": {
"total": 10.91169836500194,
"count": 477811,
"self": 0.0,
"children": {
"worker_root": {
"total": 48505.63960001629,
"count": 477811,
"is_parallel": true,
"self": 38779.75663036294,
"children": {
"steps_from_proto": {
"total": 0.13417499989736825,
"count": 70,
"is_parallel": true,
"self": 0.02296159928664565,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.1112134006107226,
"count": 280,
"is_parallel": true,
"self": 0.1112134006107226
}
}
},
"UnityEnvironment.step": {
"total": 9725.748794653453,
"count": 477811,
"is_parallel": true,
"self": 478.6078320364468,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 544.177180890576,
"count": 477811,
"is_parallel": true,
"self": 544.177180890576
},
"communicator.exchange": {
"total": 6779.5840207445435,
"count": 477811,
"is_parallel": true,
"self": 6779.5840207445435
},
"steps_from_proto": {
"total": 1923.3797609818866,
"count": 955622,
"is_parallel": true,
"self": 323.68003459542524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1599.6997263864614,
"count": 3822488,
"is_parallel": true,
"self": 1599.6997263864614
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 32356.45961963781,
"count": 477811,
"self": 119.13579186913557,
"children": {
"process_trajectory": {
"total": 4475.130673869629,
"count": 477811,
"self": 4471.104444069322,
"children": {
"RLTrainer._checkpoint": {
"total": 4.026229800307192,
"count": 14,
"self": 4.026229800307192
}
}
},
"_update_policy": {
"total": 27762.193153899047,
"count": 337,
"self": 2447.938731301343,
"children": {
"TorchPOCAOptimizer.update": {
"total": 25314.254422597704,
"count": 10110,
"self": 25314.254422597704
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.700012944638729e-06,
"count": 1,
"self": 1.700012944638729e-06
},
"TrainerController._save_models": {
"total": 0.2712425999343395,
"count": 1,
"self": 0.048163199913688004,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22307940002065152,
"count": 1,
"self": 0.22307940002065152
}
}
}
}
}
}
}