Lakoc's picture
First Push
5eb5fdb
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.556477665901184,
"min": 1.4710997343063354,
"max": 3.2958054542541504,
"count": 1004
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 62707.37109375,
"min": 57961.21484375,
"max": 196482.734375,
"count": 1004
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 63.53896103896104,
"min": 41.633620689655174,
"max": 991.0,
"count": 1004
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 39140.0,
"min": 36508.0,
"max": 44136.0,
"count": 1004
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1732.9269825569081,
"min": 1200.3452679053712,
"max": 1774.4282803625763,
"count": 1004
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 533741.5106275277,
"min": 2409.6247172591857,
"max": 807856.6717437465,
"count": 1004
},
"SoccerTwos.Step.mean": {
"value": 20079978.0,
"min": 19958.0,
"max": 20079978.0,
"count": 1004
},
"SoccerTwos.Step.sum": {
"value": 20079978.0,
"min": 19958.0,
"max": 20079978.0,
"count": 1004
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.004693327471613884,
"min": -0.11002453416585922,
"max": 0.28316113352775574,
"count": 1004
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.4502382278442383,
"min": -30.891870498657227,
"max": 87.77995300292969,
"count": 1004
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0017897997749969363,
"min": -0.10839186608791351,
"max": 0.28043779730796814,
"count": 1004
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.5530481338500977,
"min": -31.57282066345215,
"max": 86.93571472167969,
"count": 1004
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1004
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1004
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.07618511224641769,
"min": -0.3861116267913996,
"max": 0.7115907681293977,
"count": 1004
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 23.541199684143066,
"min": -86.69039988517761,
"max": 151.60579979419708,
"count": 1004
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.07618511224641769,
"min": -0.3861116267913996,
"max": 0.7115907681293977,
"count": 1004
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 23.541199684143066,
"min": -86.69039988517761,
"max": 151.60579979419708,
"count": 1004
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1004
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1004
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01931103996466845,
"min": 0.011364540978199026,
"max": 0.02613362636960422,
"count": 972
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01931103996466845,
"min": 0.011364540978199026,
"max": 0.02613362636960422,
"count": 972
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10091944734255473,
"min": 0.0017891564697492869,
"max": 0.1364498645067215,
"count": 972
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10091944734255473,
"min": 0.0017891564697492869,
"max": 0.1364498645067215,
"count": 972
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10318002626299858,
"min": 0.0017857251921668648,
"max": 0.1403713325659434,
"count": 972
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10318002626299858,
"min": 0.0017857251921668648,
"max": 0.1403713325659434,
"count": 972
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 972
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 972
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 972
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 972
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 972
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 972
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675383867",
"python_version": "3.8.16 (default, Jan 17 2023, 23:13:24) \n[GCC 11.2.0]",
"command_line_arguments": "/home/lakoc/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1675426774"
},
"total": 42906.562987696,
"count": 1,
"self": 0.059106691005581524,
"children": {
"run_training.setup": {
"total": 0.0076704550001522875,
"count": 1,
"self": 0.0076704550001522875
},
"TrainerController.start_learning": {
"total": 42906.496210549994,
"count": 1,
"self": 31.493490309010667,
"children": {
"TrainerController._reset_env": {
"total": 3.7668858350125447,
"count": 41,
"self": 3.7668858350125447
},
"TrainerController.advance": {
"total": 42870.95981721497,
"count": 1379994,
"self": 32.439994623477105,
"children": {
"env_step": {
"total": 27018.915252399358,
"count": 1379994,
"self": 21046.866747180986,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5952.585262612341,
"count": 1379994,
"self": 161.70714837239848,
"children": {
"TorchPolicy.evaluate": {
"total": 5790.878114239942,
"count": 2515149,
"self": 5790.878114239942
}
}
},
"workers": {
"total": 19.4632426060316,
"count": 1379993,
"self": 0.0,
"children": {
"worker_root": {
"total": 42851.82584652449,
"count": 1379993,
"is_parallel": true,
"self": 25313.08857872712,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0036851349996140925,
"count": 2,
"is_parallel": true,
"self": 0.0008368140006496105,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002848320998964482,
"count": 8,
"is_parallel": true,
"self": 0.002848320998964482
}
}
},
"UnityEnvironment.step": {
"total": 0.02670044400019833,
"count": 1,
"is_parallel": true,
"self": 0.0005669430011039367,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005311969998729182,
"count": 1,
"is_parallel": true,
"self": 0.0005311969998729182
},
"communicator.exchange": {
"total": 0.023805957000149647,
"count": 1,
"is_parallel": true,
"self": 0.023805957000149647
},
"steps_from_proto": {
"total": 0.0017963469990718295,
"count": 2,
"is_parallel": true,
"self": 0.0003687569997055107,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014275899993663188,
"count": 8,
"is_parallel": true,
"self": 0.0014275899993663188
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 17538.654487297365,
"count": 1379992,
"is_parallel": true,
"self": 926.6944915361855,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 594.502346487754,
"count": 1379992,
"is_parallel": true,
"self": 594.502346487754
},
"communicator.exchange": {
"total": 13301.6280554498,
"count": 1379992,
"is_parallel": true,
"self": 13301.6280554498
},
"steps_from_proto": {
"total": 2715.8295938236242,
"count": 2759984,
"is_parallel": true,
"self": 493.823149779153,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2222.006444044471,
"count": 11039936,
"is_parallel": true,
"self": 2222.006444044471
}
}
}
}
},
"steps_from_proto": {
"total": 0.08278050000080839,
"count": 80,
"is_parallel": true,
"self": 0.014847281085167197,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06793321891564119,
"count": 320,
"is_parallel": true,
"self": 0.06793321891564119
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 15819.604570192136,
"count": 1379993,
"self": 196.98733306854228,
"children": {
"process_trajectory": {
"total": 4098.746716667649,
"count": 1379993,
"self": 4088.766313262644,
"children": {
"RLTrainer._checkpoint": {
"total": 9.980403405004836,
"count": 40,
"self": 9.980403405004836
}
}
},
"_update_policy": {
"total": 11523.870520455945,
"count": 973,
"self": 2655.3542703912135,
"children": {
"TorchPOCAOptimizer.update": {
"total": 8868.516250064731,
"count": 29202,
"self": 8868.516250064731
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4309989637695253e-06,
"count": 1,
"self": 1.4309989637695253e-06
},
"TrainerController._save_models": {
"total": 0.27601575999869965,
"count": 1,
"self": 0.003253094997489825,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2727626650012098,
"count": 1,
"self": 0.2727626650012098
}
}
}
}
}
}
}