kingabzpro's picture
first commit
f4a15d2
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4788085222244263,
"min": 1.34537935256958,
"max": 3.2956905364990234,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 25127.9140625,
"min": 16445.171875,
"max": 145786.125,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 64.44155844155844,
"min": 39.942622950819676,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19848.0,
"min": 7992.0,
"max": 28332.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1895.7613020306055,
"min": 1201.592804267964,
"max": 1920.2364514241071,
"count": 4546
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 291947.24051271327,
"min": 2403.84684830682,
"max": 416879.5351197595,
"count": 4546
},
"SoccerTwos.Step.mean": {
"value": 49999916.0,
"min": 9184.0,
"max": 49999916.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999916.0,
"min": 9184.0,
"max": 49999916.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.045010194182395935,
"min": -0.15087659657001495,
"max": 0.1586652249097824,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 6.88655948638916,
"min": -28.36479949951172,
"max": 23.482452392578125,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.04811400547623634,
"min": -0.15700209140777588,
"max": 0.15648241341114044,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 7.361443042755127,
"min": -29.516393661499023,
"max": 23.17345428466797,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.20982221996082978,
"min": -0.7889999991113489,
"max": 0.4695555501514011,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 32.10279965400696,
"min": -60.55800008773804,
"max": 59.11360025405884,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.20982221996082978,
"min": -0.7889999991113489,
"max": 0.4695555501514011,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 32.10279965400696,
"min": -60.55800008773804,
"max": 59.11360025405884,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018770876314859682,
"min": 0.010020536121252614,
"max": 0.026517125661484897,
"count": 2405
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018770876314859682,
"min": 0.010020536121252614,
"max": 0.026517125661484897,
"count": 2405
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09144515817364057,
"min": 3.3547097721964294e-08,
"max": 0.131473595649004,
"count": 2405
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09144515817364057,
"min": 3.3547097721964294e-08,
"max": 0.131473595649004,
"count": 2405
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09309406379858652,
"min": 4.3677706192587116e-08,
"max": 0.135417403280735,
"count": 2405
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09309406379858652,
"min": 4.3677706192587116e-08,
"max": 0.135417403280735,
"count": 2405
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2405
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2405
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2405
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2405
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2405
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2405
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675620254",
"python_version": "3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0]",
"command_line_arguments": "/home/yucjin/hugging_face_rl/unit7/venv/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1675708861"
},
"total": 88607.92426025501,
"count": 1,
"self": 0.21856925400788896,
"children": {
"run_training.setup": {
"total": 0.008596899999247398,
"count": 1,
"self": 0.008596899999247398
},
"TrainerController.start_learning": {
"total": 88607.69709410101,
"count": 1,
"self": 54.33353889726277,
"children": {
"TrainerController._reset_env": {
"total": 7.953261757964356,
"count": 250,
"self": 7.953261757964356
},
"TrainerController.advance": {
"total": 88545.25049504777,
"count": 3420118,
"self": 50.14558512021904,
"children": {
"env_step": {
"total": 67936.5814669147,
"count": 3420118,
"self": 43416.33377479577,
"children": {
"SubprocessEnvManager._take_step": {
"total": 24488.584984037847,
"count": 3420118,
"self": 288.33730446321715,
"children": {
"TorchPolicy.evaluate": {
"total": 24200.24767957463,
"count": 6313434,
"self": 24200.24767957463
}
}
},
"workers": {
"total": 31.662708081072196,
"count": 3420118,
"self": 0.0,
"children": {
"worker_root": {
"total": 88484.73229627071,
"count": 3420118,
"is_parallel": true,
"self": 50667.68108681378,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001849100004619686,
"count": 2,
"is_parallel": true,
"self": 0.0005407000026025344,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013084000020171516,
"count": 8,
"is_parallel": true,
"self": 0.0013084000020171516
}
}
},
"UnityEnvironment.step": {
"total": 0.028766398998413933,
"count": 1,
"is_parallel": true,
"self": 0.00045390000013867393,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003077000001212582,
"count": 1,
"is_parallel": true,
"self": 0.0003077000001212582
},
"communicator.exchange": {
"total": 0.02654359900043346,
"count": 1,
"is_parallel": true,
"self": 0.02654359900043346
},
"steps_from_proto": {
"total": 0.0014611999977205414,
"count": 2,
"is_parallel": true,
"self": 0.00028139999994891696,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011797999977716245,
"count": 8,
"is_parallel": true,
"self": 0.0011797999977716245
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 37816.752694438124,
"count": 3420117,
"is_parallel": true,
"self": 1171.1702581627032,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 846.923472165432,
"count": 3420117,
"is_parallel": true,
"self": 846.923472165432
},
"communicator.exchange": {
"total": 31938.347590114598,
"count": 3420117,
"is_parallel": true,
"self": 31938.347590114598
},
"steps_from_proto": {
"total": 3860.311373995395,
"count": 6840234,
"is_parallel": true,
"self": 810.0337141514356,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3050.2776598439596,
"count": 27360936,
"is_parallel": true,
"self": 3050.2776598439596
}
}
}
}
},
"steps_from_proto": {
"total": 0.29851501880330034,
"count": 498,
"is_parallel": true,
"self": 0.06205058093837579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.23646443786492455,
"count": 1992,
"is_parallel": true,
"self": 0.23646443786492455
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 20558.52344301286,
"count": 3420118,
"self": 353.95522478349085,
"children": {
"process_trajectory": {
"total": 10449.847205032387,
"count": 3420118,
"self": 10427.388773734434,
"children": {
"RLTrainer._checkpoint": {
"total": 22.45843129795321,
"count": 100,
"self": 22.45843129795321
}
}
},
"_update_policy": {
"total": 9754.72101319698,
"count": 2405,
"self": 4998.611239978134,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4756.109773218846,
"count": 72159,
"self": 4756.109773218846
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.00003807246685e-07,
"count": 1,
"self": 5.00003807246685e-07
},
"TrainerController._save_models": {
"total": 0.15979789799894206,
"count": 1,
"self": 0.0011017839860869572,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1586961140128551,
"count": 1,
"self": 0.1586961140128551
}
}
}
}
}
}
}