poca-SoccerTwos / run_logs /timers.json
nimrita's picture
First Push`
973a8e3
raw history blame
No virus
15.5 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2150719165802,
"min": 3.170013427734375,
"max": 3.2957265377044678,
"count": 88
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 42490.390625,
"min": 16325.6259765625,
"max": 105463.203125,
"count": 88
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 495.2,
"max": 999.0,
"count": 88
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 15380.0,
"max": 27320.0,
"count": 88
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1200.0979941689066,
"min": 1193.0671509243548,
"max": 1204.4035210473291,
"count": 63
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2400.195988337813,
"min": 2389.9140730166932,
"max": 19188.856449368337,
"count": 63
},
"SoccerTwos.Step.mean": {
"value": 879414.0,
"min": 9676.0,
"max": 879414.0,
"count": 88
},
"SoccerTwos.Step.sum": {
"value": 879414.0,
"min": 9676.0,
"max": 879414.0,
"count": 88
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0022735975217074156,
"min": -0.10676523298025131,
"max": -0.0012080572778359056,
"count": 88
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.022735975682735443,
"min": -1.494713306427002,
"max": -0.018120858818292618,
"count": 88
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0028971494175493717,
"min": -0.10677098482847214,
"max": -0.00031215374474413693,
"count": 88
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.028971493244171143,
"min": -1.4947937726974487,
"max": -0.004682306200265884,
"count": 88
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 88
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 88
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.4550599992275238,
"max": 0.2824857064655849,
"count": 88
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -9.101199984550476,
"max": 3.9547998905181885,
"count": 88
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.4550599992275238,
"max": 0.2824857064655849,
"count": 88
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -9.101199984550476,
"max": 3.9547998905181885,
"count": 88
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 88
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 88
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012585476921716084,
"min": 0.011764387342069919,
"max": 0.021005486588304242,
"count": 40
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012585476921716084,
"min": 0.011764387342069919,
"max": 0.021005486588304242,
"count": 40
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0008365886198589579,
"min": 1.2928697022592435e-06,
"max": 0.0063222938605273764,
"count": 40
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0008365886198589579,
"min": 1.2928697022592435e-06,
"max": 0.0063222938605273764,
"count": 40
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.000837781477215079,
"min": 1.555102905588986e-06,
"max": 0.006354990066029131,
"count": 40
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.000837781477215079,
"min": 1.555102905588986e-06,
"max": 0.006354990066029131,
"count": 40
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 40
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 40
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 40
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 40
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 40
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701429137",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:34:57) [MSC v.1936 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1701430801"
},
"total": 1664.0088754000608,
"count": 1,
"self": 0.09524330007843673,
"children": {
"run_training.setup": {
"total": 0.136288300040178,
"count": 1,
"self": 0.136288300040178
},
"TrainerController.start_learning": {
"total": 1663.7773437999422,
"count": 1,
"self": 1.1845473876455799,
"children": {
"TrainerController._reset_env": {
"total": 3.6966400999808684,
"count": 5,
"self": 3.6966400999808684
},
"TrainerController.advance": {
"total": 1658.7602074122988,
"count": 57842,
"self": 1.203146017040126,
"children": {
"env_step": {
"total": 869.9707423954969,
"count": 57842,
"self": 662.7599132098258,
"children": {
"SubprocessEnvManager._take_step": {
"total": 206.47988678875845,
"count": 57842,
"self": 6.4427419940475374,
"children": {
"TorchPolicy.evaluate": {
"total": 200.0371447947109,
"count": 114854,
"self": 200.0371447947109
}
}
},
"workers": {
"total": 0.7309423969127238,
"count": 57842,
"self": 0.0,
"children": {
"worker_root": {
"total": 1655.4266972908517,
"count": 57842,
"is_parallel": true,
"self": 1137.4915873962454,
"children": {
"steps_from_proto": {
"total": 0.0071546999970451,
"count": 10,
"is_parallel": true,
"self": 0.0015663999365642667,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005588300060480833,
"count": 40,
"is_parallel": true,
"self": 0.005588300060480833
}
}
},
"UnityEnvironment.step": {
"total": 517.9279551946092,
"count": 57842,
"is_parallel": true,
"self": 25.801930344779976,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.350491013145074,
"count": 57842,
"is_parallel": true,
"self": 20.350491013145074
},
"communicator.exchange": {
"total": 388.40555692638736,
"count": 57842,
"is_parallel": true,
"self": 388.40555692638736
},
"steps_from_proto": {
"total": 83.36997691029683,
"count": 115684,
"is_parallel": true,
"self": 17.866936698090285,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.50304021220654,
"count": 462736,
"is_parallel": true,
"self": 65.50304021220654
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 787.5863189997617,
"count": 57842,
"self": 8.574956009048037,
"children": {
"process_trajectory": {
"total": 116.95183569041546,
"count": 57842,
"self": 116.79685379052535,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15498189989011735,
"count": 1,
"self": 0.15498189989011735
}
}
},
"_update_policy": {
"total": 662.0595273002982,
"count": 41,
"self": 108.21700470196083,
"children": {
"TorchPOCAOptimizer.update": {
"total": 553.8425225983374,
"count": 1206,
"self": 553.8425225983374
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.100008375942707e-06,
"count": 1,
"self": 1.100008375942707e-06
},
"TrainerController._save_models": {
"total": 0.1359478000085801,
"count": 1,
"self": 0.006517199915833771,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12943060009274632,
"count": 1,
"self": 0.12943060009274632
}
}
}
}
}
}
}