Golobama's picture
First Push`
ee796a3 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": -3.576278402306343e-07,
"min": -3.576278402306343e-07,
"max": 2.6939635276794434,
"count": 836
},
"SoccerTwos.Policy.Entropy.sum": {
"value": -0.005069732200354338,
"min": -0.013641356490552425,
"max": 84429.03125,
"count": 836
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 459.3333333333333,
"max": 999.0,
"count": 836
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 11024.0,
"max": 31704.0,
"count": 836
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1156.9462840677766,
"min": 1156.9462840677766,
"max": 1200.0797527907453,
"count": 63
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4627.785136271106,
"min": 2320.753618686611,
"max": 11989.95732238514,
"count": 63
},
"SoccerTwos.Step.mean": {
"value": 8359860.0,
"min": 9522.0,
"max": 8359860.0,
"count": 836
},
"SoccerTwos.Step.sum": {
"value": 8359860.0,
"min": 9522.0,
"max": 8359860.0,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.5491425395011902,
"min": -13112.5654296875,
"max": 8993.1416015625,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -5.491425514221191,
"min": -200188.71875,
"max": 89931.4140625,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.5343278050422668,
"min": -12266.4697265625,
"max": 8313.759765625,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.343277931213379,
"min": -191058.328125,
"max": 83137.59375,
"count": 836
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 836
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.5333333333333333,
"max": 0.13266666730244955,
"count": 836
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 1.5920000076293945,
"count": 836
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.5333333333333333,
"max": 0.13266666730244955,
"count": 836
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 1.5920000076293945,
"count": 836
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 836
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 836
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018108418878788748,
"min": 0.010324261578110358,
"max": 0.3402867207924525,
"count": 382
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018108418878788748,
"min": 0.010324261578110358,
"max": 0.3402867207924525,
"count": 382
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 170.31988499959309,
"min": 0.022363464161753655,
"max": 148730041.06666666,
"count": 382
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 170.31988499959309,
"min": 0.022363464161753655,
"max": 148730041.06666666,
"count": 382
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 176.16862080891926,
"min": 0.022276258375495673,
"max": 157164864.53333333,
"count": 382
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 176.16862080891926,
"min": 0.022276258375495673,
"max": 157164864.53333333,
"count": 382
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0006269649854335752,
"min": 0.0006269649854335752,
"max": 0.0006998200650257049,
"count": 382
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0006269649854335752,
"min": 0.0006269649854335752,
"max": 0.0006998200650257049,
"count": 382
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.18956642499999996,
"min": 0.18956642499999996,
"max": 0.19997429499999994,
"count": 382
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.18956642499999996,
"min": 0.18956642499999996,
"max": 0.19997429499999994,
"count": 382
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.0044793646074999995,
"min": 0.0044793646074999995,
"max": 0.0049987173205,
"count": 382
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.0044793646074999995,
"min": 0.0044793646074999995,
"max": 0.0049987173205,
"count": 382
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723398286",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\manug\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1723479190"
},
"total": 80903.70793779999,
"count": 1,
"self": 2.069925699994201,
"children": {
"run_training.setup": {
"total": 0.15105419999963487,
"count": 1,
"self": 0.15105419999963487
},
"TrainerController.start_learning": {
"total": 80901.4869579,
"count": 1,
"self": 20.6864982972329,
"children": {
"TrainerController._reset_env": {
"total": 11.717578799991315,
"count": 42,
"self": 11.717578799991315
},
"TrainerController.advance": {
"total": 80868.78792820277,
"count": 544781,
"self": 20.808423901471542,
"children": {
"env_step": {
"total": 17379.407285900703,
"count": 544781,
"self": 11370.672526601464,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5995.216470601548,
"count": 544781,
"self": 144.72061600046527,
"children": {
"TorchPolicy.evaluate": {
"total": 5850.495854601083,
"count": 1081688,
"self": 5850.495854601083
}
}
},
"workers": {
"total": 13.518288697690878,
"count": 544780,
"self": 0.0,
"children": {
"worker_root": {
"total": 80868.26328919677,
"count": 544780,
"is_parallel": true,
"self": 72190.79113669817,
"children": {
"steps_from_proto": {
"total": 0.10491680002451176,
"count": 84,
"is_parallel": true,
"self": 0.021791200074403605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08312559995010815,
"count": 336,
"is_parallel": true,
"self": 0.08312559995010815
}
}
},
"UnityEnvironment.step": {
"total": 8677.367235698575,
"count": 544780,
"is_parallel": true,
"self": 525.3884943943776,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 504.18722250334486,
"count": 544780,
"is_parallel": true,
"self": 504.18722250334486
},
"communicator.exchange": {
"total": 5963.867111395936,
"count": 544780,
"is_parallel": true,
"self": 5963.867111395936
},
"steps_from_proto": {
"total": 1683.9244074049166,
"count": 1089560,
"is_parallel": true,
"self": 345.08360731093126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1338.8408000939853,
"count": 4358240,
"is_parallel": true,
"self": 1338.8408000939853
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 63468.57221840059,
"count": 544780,
"self": 151.92435729738645,
"children": {
"process_trajectory": {
"total": 7092.484537903309,
"count": 544780,
"self": 7088.646304903301,
"children": {
"RLTrainer._checkpoint": {
"total": 3.8382330000076763,
"count": 16,
"self": 3.8382330000076763
}
}
},
"_update_policy": {
"total": 56224.1633231999,
"count": 382,
"self": 1967.5510151998606,
"children": {
"TorchPOCAOptimizer.update": {
"total": 54256.61230800004,
"count": 11469,
"self": 54256.61230800004
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.0000006770715117e-06,
"count": 1,
"self": 2.0000006770715117e-06
},
"TrainerController._save_models": {
"total": 0.2949506000004476,
"count": 1,
"self": 0.01603400000021793,
"children": {
"RLTrainer._checkpoint": {
"total": 0.27891660000022966,
"count": 1,
"self": 0.27891660000022966
}
}
}
}
}
}
}