poca-SoccerTwos / run_logs /timers.json
thegadri's picture
First Push
1c81b44
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.164069652557373,
"min": 3.164069652557373,
"max": 3.2957675457000732,
"count": 100
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 95276.46875,
"min": 12234.484375,
"max": 138686.203125,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 807.7142857142857,
"min": 456.3,
"max": 999.0,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 22616.0,
"min": 9936.0,
"max": 30560.0,
"count": 100
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1209.8359047896772,
"min": 1190.9718619790642,
"max": 1209.8359047896772,
"count": 92
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4839.343619158709,
"min": 2384.5296435698465,
"max": 14449.75863327991,
"count": 92
},
"SoccerTwos.Step.mean": {
"value": 999394.0,
"min": 9296.0,
"max": 999394.0,
"count": 100
},
"SoccerTwos.Step.sum": {
"value": 999394.0,
"min": 9296.0,
"max": 999394.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.006452392321079969,
"min": -0.07377874106168747,
"max": 0.01881234161555767,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.08388110250234604,
"min": -1.0875669717788696,
"max": 0.24456045031547546,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.00683071743696928,
"min": -0.07252873480319977,
"max": 0.01654862053692341,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.08879932761192322,
"min": -1.0878794193267822,
"max": 0.21513205766677856,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.255707690348992,
"min": -0.5333333333333333,
"max": 0.2882285714149475,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 3.3241999745368958,
"min": -8.0,
"max": 4.035199999809265,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.255707690348992,
"min": -0.5333333333333333,
"max": 0.2882285714149475,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 3.3241999745368958,
"min": -8.0,
"max": 4.035199999809265,
"count": 100
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01862032460824897,
"min": 0.01137493228064462,
"max": 0.022747018044659246,
"count": 46
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01862032460824897,
"min": 0.01137493228064462,
"max": 0.022747018044659246,
"count": 46
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0035500805048892894,
"min": 3.304707370261894e-05,
"max": 0.0062710876576602455,
"count": 46
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0035500805048892894,
"min": 3.304707370261894e-05,
"max": 0.0062710876576602455,
"count": 46
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.003666390172050645,
"min": 3.529365700766599e-05,
"max": 0.006302091960484783,
"count": 46
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.003666390172050645,
"min": 3.529365700766599e-05,
"max": 0.006302091960484783,
"count": 46
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 46
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 46
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 46
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 46
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 46
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 46
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699522137",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699524675"
},
"total": 2538.3740225720003,
"count": 1,
"self": 0.501028629999837,
"children": {
"run_training.setup": {
"total": 0.04866530800001101,
"count": 1,
"self": 0.04866530800001101
},
"TrainerController.start_learning": {
"total": 2537.8243286340003,
"count": 1,
"self": 1.9233806639435898,
"children": {
"TrainerController._reset_env": {
"total": 10.658171014000231,
"count": 5,
"self": 10.658171014000231
},
"TrainerController.advance": {
"total": 2525.0188047640563,
"count": 65591,
"self": 2.1432619409993094,
"children": {
"env_step": {
"total": 2111.225529583983,
"count": 65591,
"self": 1626.561518734965,
"children": {
"SubprocessEnvManager._take_step": {
"total": 483.4926342370325,
"count": 65591,
"self": 13.021397860001684,
"children": {
"TorchPolicy.evaluate": {
"total": 470.47123637703083,
"count": 130178,
"self": 470.47123637703083
}
}
},
"workers": {
"total": 1.1713766119857496,
"count": 65591,
"self": 0.0,
"children": {
"worker_root": {
"total": 2529.328252156988,
"count": 65591,
"is_parallel": true,
"self": 1168.4915147969782,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00958781200000658,
"count": 2,
"is_parallel": true,
"self": 0.007100627000056647,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002487184999949932,
"count": 8,
"is_parallel": true,
"self": 0.002487184999949932
}
}
},
"UnityEnvironment.step": {
"total": 0.04253263200001811,
"count": 1,
"is_parallel": true,
"self": 0.0011928259999649526,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009418900000355279,
"count": 1,
"is_parallel": true,
"self": 0.0009418900000355279
},
"communicator.exchange": {
"total": 0.03651335099999642,
"count": 1,
"is_parallel": true,
"self": 0.03651335099999642
},
"steps_from_proto": {
"total": 0.0038845650000212117,
"count": 2,
"is_parallel": true,
"self": 0.0006601549999913914,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0032244100000298204,
"count": 8,
"is_parallel": true,
"self": 0.0032244100000298204
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1360.8268681340103,
"count": 65590,
"is_parallel": true,
"self": 87.23338863910044,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 59.56226188698008,
"count": 65590,
"is_parallel": true,
"self": 59.56226188698008
},
"communicator.exchange": {
"total": 938.2080999589514,
"count": 65590,
"is_parallel": true,
"self": 938.2080999589514
},
"steps_from_proto": {
"total": 275.82311764897827,
"count": 131180,
"is_parallel": true,
"self": 45.18851703595652,
"children": {
"_process_rank_one_or_two_observation": {
"total": 230.63460061302175,
"count": 524720,
"is_parallel": true,
"self": 230.63460061302175
}
}
}
}
},
"steps_from_proto": {
"total": 0.009869225999750597,
"count": 8,
"is_parallel": true,
"self": 0.0021275120001291725,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007741713999621425,
"count": 32,
"is_parallel": true,
"self": 0.007741713999621425
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 411.65001323907364,
"count": 65591,
"self": 15.25448524208224,
"children": {
"process_trajectory": {
"total": 116.3700428379903,
"count": 65591,
"self": 115.91781805399006,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45222478400023647,
"count": 2,
"self": 0.45222478400023647
}
}
},
"_update_policy": {
"total": 280.0254851590011,
"count": 46,
"self": 170.90006789300924,
"children": {
"TorchPOCAOptimizer.update": {
"total": 109.12541726599187,
"count": 1380,
"self": 109.12541726599187
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1259999155299738e-06,
"count": 1,
"self": 1.1259999155299738e-06
},
"TrainerController._save_models": {
"total": 0.22397106600010375,
"count": 1,
"self": 0.0027897489999304526,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2211813170001733,
"count": 1,
"self": 0.2211813170001733
}
}
}
}
}
}
}