poca-SoccerTwos / run_logs /timers.json
peteozegov's picture
First Push
ce4c878
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.897310256958008,
"min": 2.848477363586426,
"max": 3.295743465423584,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 40701.4140625,
"min": 13514.955078125,
"max": 119905.03125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 449.1,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 10852.0,
"max": 29248.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1202.0664214104136,
"min": 1197.0971271610715,
"max": 1214.2053557656131,
"count": 240
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2404.132842820827,
"min": 2394.194254322143,
"max": 16819.672321847218,
"count": 240
},
"SoccerTwos.Step.mean": {
"value": 4999266.0,
"min": 9356.0,
"max": 4999266.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999266.0,
"min": 9356.0,
"max": 4999266.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.00028921442572027445,
"min": -0.021255120635032654,
"max": 0.01638713665306568,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.0028921442572027445,
"min": -0.22371436655521393,
"max": 0.16416141390800476,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.00027330080047249794,
"min": -0.026361608877778053,
"max": 0.01724361814558506,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.0027330080047249794,
"min": -0.26361608505249023,
"max": 0.1724361777305603,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.6363636363636364,
"max": 0.28514666159947716,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 4.277199923992157,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.6363636363636364,
"max": 0.28514666159947716,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 4.277199923992157,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012303747368666034,
"min": 0.01181834559198857,
"max": 0.02552387221949175,
"count": 231
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012303747368666034,
"min": 0.01181834559198857,
"max": 0.02552387221949175,
"count": 231
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 8.162543826036502e-08,
"min": 3.870426934504678e-09,
"max": 0.005837825002769629,
"count": 231
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 8.162543826036502e-08,
"min": 3.870426934504678e-09,
"max": 0.005837825002769629,
"count": 231
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 7.314902236525238e-08,
"min": 3.573569388552282e-09,
"max": 0.005854411035155257,
"count": 231
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 7.314902236525238e-08,
"min": 3.573569388552282e-09,
"max": 0.005854411035155257,
"count": 231
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 231
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 231
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 231
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 231
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 231
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 231
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686084129",
"python_version": "3.9.16 (main, Mar 8 2023, 04:29:44) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/pete/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1686106843"
},
"total": 22712.8804085,
"count": 1,
"self": 2.4257677500027057,
"children": {
"run_training.setup": {
"total": 0.033340499999999995,
"count": 1,
"self": 0.033340499999999995
},
"TrainerController.start_learning": {
"total": 22710.42130025,
"count": 1,
"self": 4.830903147627396,
"children": {
"TrainerController._reset_env": {
"total": 5.326529873997477,
"count": 25,
"self": 5.326529873997477
},
"TrainerController.advance": {
"total": 22700.152816895377,
"count": 324961,
"self": 4.345810035294562,
"children": {
"env_step": {
"total": 4773.947685887066,
"count": 324961,
"self": 3821.429469863415,
"children": {
"SubprocessEnvManager._take_step": {
"total": 949.2356304140326,
"count": 324961,
"self": 25.590473819653198,
"children": {
"TorchPolicy.evaluate": {
"total": 923.6451565943794,
"count": 645372,
"self": 923.6451565943794
}
}
},
"workers": {
"total": 3.2825856096184918,
"count": 324961,
"self": 0.0,
"children": {
"worker_root": {
"total": 22698.56535456274,
"count": 324961,
"is_parallel": true,
"self": 19473.169866872755,
"children": {
"steps_from_proto": {
"total": 0.03979620699360531,
"count": 50,
"is_parallel": true,
"self": 0.007292419975294173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03250378701831114,
"count": 200,
"is_parallel": true,
"self": 0.03250378701831114
}
}
},
"UnityEnvironment.step": {
"total": 3225.3556914829906,
"count": 324961,
"is_parallel": true,
"self": 142.27648066783104,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 96.10931491563395,
"count": 324961,
"is_parallel": true,
"self": 96.10931491563395
},
"communicator.exchange": {
"total": 2528.892965574136,
"count": 324961,
"is_parallel": true,
"self": 2528.892965574136
},
"steps_from_proto": {
"total": 458.0769303253895,
"count": 649922,
"is_parallel": true,
"self": 74.26221355696799,
"children": {
"_process_rank_one_or_two_observation": {
"total": 383.81471676842153,
"count": 2599688,
"is_parallel": true,
"self": 383.81471676842153
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 17921.859320973017,
"count": 324961,
"self": 59.730450604132784,
"children": {
"process_trajectory": {
"total": 1957.5087623638713,
"count": 324961,
"self": 1954.081727070871,
"children": {
"RLTrainer._checkpoint": {
"total": 3.4270352930002446,
"count": 10,
"self": 3.4270352930002446
}
}
},
"_update_policy": {
"total": 15904.620108005014,
"count": 231,
"self": 683.2543208339684,
"children": {
"TorchPOCAOptimizer.update": {
"total": 15221.365787171046,
"count": 6936,
"self": 15221.365787171046
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.249974831007421e-07,
"count": 1,
"self": 6.249974831007421e-07
},
"TrainerController._save_models": {
"total": 0.11104970800079172,
"count": 1,
"self": 0.003987708001659485,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10706199999913224,
"count": 1,
"self": 0.10706199999913224
}
}
}
}
}
}
}