{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.3549913167953491, "min": 1.307385802268982, "max": 3.1951282024383545, "count": 4581 }, "SoccerTwos.Policy.Entropy.sum": { "value": 26969.74609375, "min": 5801.3173828125, "max": 105460.5703125, "count": 4581 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 69.11267605633803, "min": 39.65573770491803, "max": 978.5, "count": 4581 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19628.0, "min": 380.0, "max": 29748.0, "count": 4581 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1678.4620172765096, "min": 1212.5681852336063, "max": 1765.840257411942, "count": 4581 }, "SoccerTwos.Self-play.ELO.sum": { "value": 238341.60645326436, "min": 2426.137259852876, "max": 411467.6637855345, "count": 4581 }, "SoccerTwos.Step.mean": { "value": 49999942.0, "min": 4199998.0, "max": 49999942.0, "count": 4581 }, "SoccerTwos.Step.sum": { "value": 49999942.0, "min": 4199998.0, "max": 49999942.0, "count": 4581 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": 0.006697884760797024, "min": -0.13330614566802979, "max": 0.15213260054588318, "count": 4581 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": 0.9577975273132324, "min": -27.450756072998047, "max": 21.43090057373047, "count": 4581 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": 0.006579205859452486, "min": -0.13330888748168945, "max": 0.1541464775800705, "count": 4581 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": 0.940826416015625, "min": -27.68012237548828, "max": 21.721294403076172, "count": 4581 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 4581 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 4581 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.01045314665440913, "min": -0.6911157902918363, "max": 0.41237746913668133, "count": 4581 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 1.4947999715805054, "min": -66.42960047721863, "max": 61.84880030155182, "count": 4581 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.01045314665440913, "min": -0.6911157902918363, "max": 0.41237746913668133, "count": 4581 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 1.4947999715805054, "min": -66.42960047721863, "max": 61.84880030155182, "count": 4581 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 4581 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 4581 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.01660262036086048, "min": 0.011109176481841131, "max": 0.02823437072414284, "count": 2223 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.01660262036086048, "min": 0.011109176481841131, "max": 0.02823437072414284, "count": 2223 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.1093051423629125, "min": 0.002029539898891623, "max": 0.13233825986584027, "count": 2223 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.1093051423629125, "min": 0.002029539898891623, "max": 0.13233825986584027, "count": 2223 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.11064696709314982, "min": 0.0020286145076776545, "max": 0.13497360696395239, "count": 2223 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.11064696709314982, "min": 0.0020286145076776545, "max": 0.13497360696395239, "count": 2223 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 2223 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 2223 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 2223 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 2223 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 2223 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 2223 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1678454446", "python_version": "3.9.16 (main, Mar 1 2023, 18:30:21) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\idsadmin\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.13.1+cpu", "numpy_version": "1.21.2", "end_time_seconds": "1678581541" }, "total": 127094.7696381, "count": 1, "self": 0.251583400007803, "children": { "run_training.setup": { "total": 0.13284220000000024, "count": 1, "self": 0.13284220000000024 }, "TrainerController.start_learning": { "total": 127094.3852125, "count": 1, "self": 69.88602589456423, "children": { "TrainerController._reset_env": { "total": 8.308279500000452, "count": 230, "self": 8.308279500000452 }, "TrainerController.advance": { "total": 127016.07745260544, "count": 3175866, "self": 67.77394620975247, "children": { "env_step": { "total": 48998.369104898295, "count": 3175866, "self": 38793.24266990588, "children": { "SubprocessEnvManager._take_step": { "total": 10162.346585893176, "count": 3175866, "self": 340.4563593692965, "children": { "TorchPolicy.evaluate": { "total": 9821.89022652388, "count": 5748170, "self": 9821.89022652388 } } }, "workers": { "total": 42.779849099240174, "count": 3175866, "self": 0.0, "children": { "worker_root": { "total": 127000.02357811066, "count": 3175866, "is_parallel": true, "self": 95388.21414811551, "children": { "steps_from_proto": { "total": 0.3613851000384489, "count": 460, "is_parallel": true, "self": 0.07185589999247721, "children": { "_process_rank_one_or_two_observation": { "total": 0.2895292000459717, "count": 1840, "is_parallel": true, "self": 0.2895292000459717 } } }, "UnityEnvironment.step": { "total": 31611.448044895107, "count": 3175866, "is_parallel": true, "self": 1413.6317041753282, "children": { "UnityEnvironment._generate_step_input": { "total": 1000.4828017003056, "count": 3175866, "is_parallel": true, "self": 1000.4828017003056 }, "communicator.exchange": { "total": 24598.79401260727, "count": 3175866, "is_parallel": true, "self": 24598.79401260727 }, "steps_from_proto": { "total": 4598.5395264122035, "count": 6351732, "is_parallel": true, "self": 931.7209175904354, "children": { "_process_rank_one_or_two_observation": { "total": 3666.818608821768, "count": 25406928, "is_parallel": true, "self": 3666.818608821768 } } } } } } } } } } }, "trainer_advance": { "total": 77949.9344014974, "count": 3175866, "self": 464.8267217055982, "children": { "process_trajectory": { "total": 11435.076538491645, "count": 3175866, "self": 11423.952597291647, "children": { "RLTrainer._checkpoint": { "total": 11.123941199998058, "count": 92, "self": 11.123941199998058 } } }, "_update_policy": { "total": 66050.03114130016, "count": 2223, "self": 6383.154494399307, "children": { "TorchPOCAOptimizer.update": { "total": 59666.876646900855, "count": 66690, "self": 59666.876646900855 } } } } } } }, "trainer_threads": { "total": 6.999907782301307e-07, "count": 1, "self": 6.999907782301307e-07 }, "TrainerController._save_models": { "total": 0.11345380000420846, "count": 1, "self": 0.0025932999997166917, "children": { "RLTrainer._checkpoint": { "total": 0.11086050000449177, "count": 1, "self": 0.11086050000449177 } } } } } } }