|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"best_supernet_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"global_step": 41175, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.06, |
|
"learning_rate": 2.998921662946385e-05, |
|
"loss": 5.928, |
|
"step": 500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.12, |
|
"learning_rate": 2.9956708418044054e-05, |
|
"loss": 3.6128, |
|
"step": 1000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.18, |
|
"learning_rate": 2.990265252786665e-05, |
|
"loss": 3.0946, |
|
"step": 1500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.24, |
|
"learning_rate": 2.982673683458095e-05, |
|
"loss": 2.836, |
|
"step": 2000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.3, |
|
"learning_rate": 2.972924541394191e-05, |
|
"loss": 2.6325, |
|
"step": 2500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.36, |
|
"learning_rate": 2.9610320134543718e-05, |
|
"loss": 2.5171, |
|
"step": 3000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.43, |
|
"learning_rate": 2.9470134055329297e-05, |
|
"loss": 2.3989, |
|
"step": 3500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.49, |
|
"learning_rate": 2.9308891173756593e-05, |
|
"loss": 2.2948, |
|
"step": 4000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.55, |
|
"learning_rate": 2.9126826128943387e-05, |
|
"loss": 2.1684, |
|
"step": 4500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.61, |
|
"learning_rate": 2.892420386022268e-05, |
|
"loss": 2.1619, |
|
"step": 5000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.67, |
|
"learning_rate": 2.8701319221605467e-05, |
|
"loss": 2.0291, |
|
"step": 5500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.73, |
|
"learning_rate": 2.8458496552711964e-05, |
|
"loss": 1.9988, |
|
"step": 6000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.79, |
|
"learning_rate": 2.819608920679567e-05, |
|
"loss": 1.9522, |
|
"step": 6500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.85, |
|
"learning_rate": 2.7914479036547047e-05, |
|
"loss": 1.9296, |
|
"step": 7000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.91, |
|
"learning_rate": 2.7614075838425082e-05, |
|
"loss": 1.8511, |
|
"step": 7500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 0.97, |
|
"learning_rate": 2.7295316756325375e-05, |
|
"loss": 1.8184, |
|
"step": 8000 |
|
}, |
|
{ |
|
"Minimum SubNet": "OrderedDict([(<ElasticityDim.WIDTH: 'width'>, {0: 384, 1: 192, 2: 384, 3: 448, 4: 448, 5: 384, 6: 448, 7: 448, 8: 610, 9: 544, 10: 631, 11: 653, 12: 591, 13: 549, 14: 508, 15: 485})])", |
|
"epoch": 1.0, |
|
"eval_HasAns_exact": 62.90485829959514, |
|
"eval_HasAns_f1": 69.4564460879015, |
|
"eval_HasAns_total": 5928, |
|
"eval_NoAns_exact": 68.35996635828427, |
|
"eval_NoAns_f1": 68.35996635828427, |
|
"eval_NoAns_total": 5945, |
|
"eval_best_exact": 65.64474016676493, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 68.91584371339046, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 65.6363176956119, |
|
"eval_f1": 68.90742124223719, |
|
"eval_runtime": 19.8249, |
|
"eval_samples_per_second": 612.057, |
|
"eval_steps_per_second": 4.792, |
|
"eval_total": 11873, |
|
"step": 8235 |
|
}, |
|
{ |
|
"SuperNet": "OrderedDict([(<ElasticityDim.WIDTH: 'width'>, {0: 512, 1: 512, 2: 512, 3: 512, 4: 512, 5: 512, 6: 512, 7: 512, 8: 2048, 9: 2048, 10: 2048, 11: 2048, 12: 2048, 13: 2048, 14: 2048, 15: 2048})])", |
|
"epoch": 1.0, |
|
"eval_HasAns_exact": 67.6450742240216, |
|
"eval_HasAns_f1": 73.95858758792157, |
|
"eval_HasAns_total": 5928, |
|
"eval_NoAns_exact": 68.29268292682927, |
|
"eval_NoAns_f1": 68.29268292682927, |
|
"eval_NoAns_total": 5945, |
|
"eval_best_exact": 67.97776467615599, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 71.13000145045065, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 67.96934220500295, |
|
"eval_f1": 71.12157897929758, |
|
"eval_runtime": 23.0026, |
|
"eval_samples_per_second": 527.506, |
|
"eval_steps_per_second": 4.13, |
|
"eval_total": 11873, |
|
"step": 8235 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.03, |
|
"learning_rate": 2.6959356482568783e-05, |
|
"loss": 1.5854, |
|
"step": 8500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.09, |
|
"learning_rate": 2.6605337532847057e-05, |
|
"loss": 1.4178, |
|
"step": 9000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.15, |
|
"learning_rate": 2.6234430605598938e-05, |
|
"loss": 1.4158, |
|
"step": 9500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.21, |
|
"learning_rate": 2.5847175441080488e-05, |
|
"loss": 1.3996, |
|
"step": 10000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.28, |
|
"learning_rate": 2.54441355693482e-05, |
|
"loss": 1.3329, |
|
"step": 10500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.34, |
|
"learning_rate": 2.502589749021534e-05, |
|
"loss": 1.3709, |
|
"step": 11000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.4, |
|
"learning_rate": 2.459394962034141e-05, |
|
"loss": 1.3598, |
|
"step": 11500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.46, |
|
"learning_rate": 2.4148096507143148e-05, |
|
"loss": 1.3531, |
|
"step": 12000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.52, |
|
"learning_rate": 2.3688051425634673e-05, |
|
"loss": 1.3325, |
|
"step": 12500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.58, |
|
"learning_rate": 2.321536357377945e-05, |
|
"loss": 1.3222, |
|
"step": 13000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.64, |
|
"learning_rate": 2.273072080247337e-05, |
|
"loss": 1.3055, |
|
"step": 13500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.7, |
|
"learning_rate": 2.2234828359298165e-05, |
|
"loss": 1.2822, |
|
"step": 14000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.76, |
|
"learning_rate": 2.172943072286878e-05, |
|
"loss": 1.2867, |
|
"step": 14500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.82, |
|
"learning_rate": 2.1213237947389485e-05, |
|
"loss": 1.2798, |
|
"step": 15000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.88, |
|
"learning_rate": 2.0688003726754053e-05, |
|
"loss": 1.2475, |
|
"step": 15500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 1.94, |
|
"learning_rate": 2.0154492376837755e-05, |
|
"loss": 1.2647, |
|
"step": 16000 |
|
}, |
|
{ |
|
"Minimum SubNet": "OrderedDict([(<ElasticityDim.WIDTH: 'width'>, {0: 384, 1: 192, 2: 384, 3: 448, 4: 448, 5: 384, 6: 448, 7: 448, 8: 610, 9: 544, 10: 631, 11: 653, 12: 591, 13: 549, 14: 508, 15: 485})])", |
|
"epoch": 2.0, |
|
"eval_HasAns_exact": 63.917004048582996, |
|
"eval_HasAns_f1": 70.22830220550237, |
|
"eval_HasAns_total": 5928, |
|
"eval_NoAns_exact": 72.0605550883095, |
|
"eval_NoAns_f1": 72.0605550883095, |
|
"eval_NoAns_total": 5945, |
|
"eval_best_exact": 67.99460961846205, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 71.14574037515544, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 67.99460961846205, |
|
"eval_f1": 71.14574037515541, |
|
"eval_runtime": 17.7943, |
|
"eval_samples_per_second": 681.904, |
|
"eval_steps_per_second": 5.339, |
|
"eval_total": 11873, |
|
"step": 16470 |
|
}, |
|
{ |
|
"SuperNet": "OrderedDict([(<ElasticityDim.WIDTH: 'width'>, {0: 512, 1: 512, 2: 512, 3: 512, 4: 512, 5: 512, 6: 512, 7: 512, 8: 2048, 9: 2048, 10: 2048, 11: 2048, 12: 2048, 13: 2048, 14: 2048, 15: 2048})])", |
|
"epoch": 2.0, |
|
"eval_HasAns_exact": 68.60661268556005, |
|
"eval_HasAns_f1": 74.58138647737387, |
|
"eval_HasAns_total": 5928, |
|
"eval_NoAns_exact": 71.0681244743482, |
|
"eval_NoAns_f1": 71.0681244743482, |
|
"eval_NoAns_total": 5945, |
|
"eval_best_exact": 69.839130800977, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 72.82224029629188, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 69.839130800977, |
|
"eval_f1": 72.82224029629188, |
|
"eval_runtime": 18.7704, |
|
"eval_samples_per_second": 646.444, |
|
"eval_steps_per_second": 5.061, |
|
"eval_total": 11873, |
|
"step": 16470 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.0, |
|
"learning_rate": 1.961674714301095e-05, |
|
"loss": 1.2283, |
|
"step": 16500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.06, |
|
"learning_rate": 1.9069059444969563e-05, |
|
"loss": 0.9251, |
|
"step": 17000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.13, |
|
"learning_rate": 1.8515450490069793e-05, |
|
"loss": 0.913, |
|
"step": 17500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.19, |
|
"learning_rate": 1.7956725884832123e-05, |
|
"loss": 0.9281, |
|
"step": 18000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.25, |
|
"learning_rate": 1.7393698680023048e-05, |
|
"loss": 0.9135, |
|
"step": 18500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.31, |
|
"learning_rate": 1.682718818751133e-05, |
|
"loss": 0.9216, |
|
"step": 19000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.37, |
|
"learning_rate": 1.6258018788013082e-05, |
|
"loss": 0.9112, |
|
"step": 19500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.43, |
|
"learning_rate": 1.5687018731460723e-05, |
|
"loss": 0.8918, |
|
"step": 20000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.49, |
|
"learning_rate": 1.5116163375957171e-05, |
|
"loss": 0.8542, |
|
"step": 20500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.55, |
|
"learning_rate": 1.4543995705456567e-05, |
|
"loss": 0.8708, |
|
"step": 21000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.61, |
|
"learning_rate": 1.3972491608096893e-05, |
|
"loss": 0.8999, |
|
"step": 21500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.67, |
|
"learning_rate": 1.3402482731240843e-05, |
|
"loss": 0.8712, |
|
"step": 22000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.73, |
|
"learning_rate": 1.2834798546420376e-05, |
|
"loss": 0.8533, |
|
"step": 22500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.79, |
|
"learning_rate": 1.2272515909019886e-05, |
|
"loss": 0.9089, |
|
"step": 23000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.85, |
|
"learning_rate": 1.1711937270962604e-05, |
|
"loss": 0.8488, |
|
"step": 23500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.91, |
|
"learning_rate": 1.1156143390721824e-05, |
|
"loss": 0.8619, |
|
"step": 24000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 2.98, |
|
"learning_rate": 1.0605943054300711e-05, |
|
"loss": 0.8615, |
|
"step": 24500 |
|
}, |
|
{ |
|
"Minimum SubNet": "OrderedDict([(<ElasticityDim.WIDTH: 'width'>, {0: 384, 1: 192, 2: 384, 3: 448, 4: 448, 5: 384, 6: 448, 7: 448, 8: 610, 9: 544, 10: 631, 11: 653, 12: 591, 13: 549, 14: 508, 15: 485})])", |
|
"epoch": 3.0, |
|
"eval_HasAns_exact": 67.03778677462888, |
|
"eval_HasAns_f1": 73.62919057241987, |
|
"eval_HasAns_total": 5928, |
|
"eval_NoAns_exact": 68.86459209419681, |
|
"eval_NoAns_f1": 68.86459209419681, |
|
"eval_NoAns_total": 5945, |
|
"eval_best_exact": 67.95249726269688, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 71.24348030938324, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 67.95249726269688, |
|
"eval_f1": 71.2434803093832, |
|
"eval_runtime": 20.6828, |
|
"eval_samples_per_second": 586.672, |
|
"eval_steps_per_second": 4.593, |
|
"eval_total": 11873, |
|
"step": 24705 |
|
}, |
|
{ |
|
"SuperNet": "OrderedDict([(<ElasticityDim.WIDTH: 'width'>, {0: 512, 1: 512, 2: 512, 3: 512, 4: 512, 5: 512, 6: 512, 7: 512, 8: 2048, 9: 2048, 10: 2048, 11: 2048, 12: 2048, 13: 2048, 14: 2048, 15: 2048})])", |
|
"epoch": 3.0, |
|
"eval_HasAns_exact": 70.20917678812415, |
|
"eval_HasAns_f1": 76.31500557470775, |
|
"eval_HasAns_total": 5928, |
|
"eval_NoAns_exact": 70.51303616484441, |
|
"eval_NoAns_f1": 70.51303616484441, |
|
"eval_NoAns_total": 5945, |
|
"eval_best_exact": 70.36132401246526, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 73.40986718157744, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 70.36132401246526, |
|
"eval_f1": 73.40986718157748, |
|
"eval_runtime": 24.0079, |
|
"eval_samples_per_second": 505.418, |
|
"eval_steps_per_second": 3.957, |
|
"eval_total": 11873, |
|
"step": 24705 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.04, |
|
"learning_rate": 1.0063217611277036e-05, |
|
"loss": 0.7353, |
|
"step": 25000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.1, |
|
"learning_rate": 9.526581842660798e-06, |
|
"loss": 0.6453, |
|
"step": 25500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.16, |
|
"learning_rate": 8.997910940432307e-06, |
|
"loss": 0.6729, |
|
"step": 26000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.22, |
|
"learning_rate": 8.477974221493395e-06, |
|
"loss": 0.6703, |
|
"step": 26500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.28, |
|
"learning_rate": 7.967528292844926e-06, |
|
"loss": 0.6633, |
|
"step": 27000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.34, |
|
"learning_rate": 7.467315950579387e-06, |
|
"loss": 0.6608, |
|
"step": 27500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.4, |
|
"learning_rate": 6.978065098971048e-06, |
|
"loss": 0.6479, |
|
"step": 28000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.46, |
|
"learning_rate": 6.500487691236567e-06, |
|
"loss": 0.6683, |
|
"step": 28500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.52, |
|
"learning_rate": 6.036196314774858e-06, |
|
"loss": 0.6597, |
|
"step": 29000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.58, |
|
"learning_rate": 5.584005940876061e-06, |
|
"loss": 0.6739, |
|
"step": 29500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.64, |
|
"learning_rate": 5.145517632566403e-06, |
|
"loss": 0.6578, |
|
"step": 30000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.7, |
|
"learning_rate": 4.721369473889145e-06, |
|
"loss": 0.6535, |
|
"step": 30500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.76, |
|
"learning_rate": 4.3129817358827764e-06, |
|
"loss": 0.6433, |
|
"step": 31000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.83, |
|
"learning_rate": 3.919312074840663e-06, |
|
"loss": 0.6612, |
|
"step": 31500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.89, |
|
"learning_rate": 3.541766926172923e-06, |
|
"loss": 0.6668, |
|
"step": 32000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 3.95, |
|
"learning_rate": 3.18089569001271e-06, |
|
"loss": 0.6456, |
|
"step": 32500 |
|
}, |
|
{ |
|
"Minimum SubNet": "OrderedDict([(<ElasticityDim.WIDTH: 'width'>, {0: 384, 1: 192, 2: 384, 3: 448, 4: 448, 5: 384, 6: 448, 7: 448, 8: 610, 9: 544, 10: 631, 11: 653, 12: 591, 13: 549, 14: 508, 15: 485})])", |
|
"epoch": 4.0, |
|
"eval_HasAns_exact": 68.69095816464238, |
|
"eval_HasAns_f1": 75.3675933308443, |
|
"eval_HasAns_total": 5928, |
|
"eval_NoAns_exact": 67.2834314550042, |
|
"eval_NoAns_f1": 67.2834314550042, |
|
"eval_NoAns_total": 5945, |
|
"eval_best_exact": 67.98618714730902, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 71.31972486020774, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 67.98618714730902, |
|
"eval_f1": 71.31972486020777, |
|
"eval_runtime": 19.136, |
|
"eval_samples_per_second": 634.092, |
|
"eval_steps_per_second": 4.964, |
|
"eval_total": 11873, |
|
"step": 32940 |
|
}, |
|
{ |
|
"SuperNet": "OrderedDict([(<ElasticityDim.WIDTH: 'width'>, {0: 512, 1: 512, 2: 512, 3: 512, 4: 512, 5: 512, 6: 512, 7: 512, 8: 2048, 9: 2048, 10: 2048, 11: 2048, 12: 2048, 13: 2048, 14: 2048, 15: 2048})])", |
|
"epoch": 4.0, |
|
"eval_HasAns_exact": 71.89608636977059, |
|
"eval_HasAns_f1": 78.17919334933525, |
|
"eval_HasAns_total": 5928, |
|
"eval_NoAns_exact": 67.70395290159799, |
|
"eval_NoAns_f1": 67.70395290159799, |
|
"eval_NoAns_total": 5945, |
|
"eval_best_exact": 69.79701844521182, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 72.93407379557496, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 69.79701844521182, |
|
"eval_f1": 72.93407379557496, |
|
"eval_runtime": 20.2232, |
|
"eval_samples_per_second": 600.004, |
|
"eval_steps_per_second": 4.698, |
|
"eval_total": 11873, |
|
"step": 32940 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.01, |
|
"learning_rate": 2.8378933458497492e-06, |
|
"loss": 0.6269, |
|
"step": 33000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.07, |
|
"learning_rate": 2.5125184709830478e-06, |
|
"loss": 0.5658, |
|
"step": 33500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.13, |
|
"learning_rate": 2.204645347373324e-06, |
|
"loss": 0.5484, |
|
"step": 34000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.19, |
|
"learning_rate": 1.9153919029589925e-06, |
|
"loss": 0.5433, |
|
"step": 34500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.25, |
|
"learning_rate": 1.6451790566193852e-06, |
|
"loss": 0.5692, |
|
"step": 35000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.31, |
|
"learning_rate": 1.3944000195354374e-06, |
|
"loss": 0.5627, |
|
"step": 35500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.37, |
|
"learning_rate": 1.1638616961868014e-06, |
|
"loss": 0.5623, |
|
"step": 36000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.43, |
|
"learning_rate": 9.53778694922398e-07, |
|
"loss": 0.5493, |
|
"step": 36500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.49, |
|
"learning_rate": 7.632514341613572e-07, |
|
"loss": 0.5609, |
|
"step": 37000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.55, |
|
"learning_rate": 5.934413553993973e-07, |
|
"loss": 0.5642, |
|
"step": 37500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.61, |
|
"learning_rate": 4.445955646587174e-07, |
|
"loss": 0.5642, |
|
"step": 38000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.68, |
|
"learning_rate": 3.169306609276462e-07, |
|
"loss": 0.5636, |
|
"step": 38500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.74, |
|
"learning_rate": 2.1063242096787116e-07, |
|
"loss": 0.5666, |
|
"step": 39000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.8, |
|
"learning_rate": 1.258555289738389e-07, |
|
"loss": 0.5576, |
|
"step": 39500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.86, |
|
"learning_rate": 6.272335147777586e-08, |
|
"loss": 0.5597, |
|
"step": 40000 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.92, |
|
"learning_rate": 2.1327757827879212e-08, |
|
"loss": 0.5437, |
|
"step": 40500 |
|
}, |
|
{ |
|
"compression_loss": 0.0, |
|
"epoch": 4.98, |
|
"learning_rate": 1.7289865009056939e-09, |
|
"loss": 0.569, |
|
"step": 41000 |
|
}, |
|
{ |
|
"Minimum SubNet": "OrderedDict([(<ElasticityDim.WIDTH: 'width'>, {0: 384, 1: 192, 2: 384, 3: 448, 4: 448, 5: 384, 6: 448, 7: 448, 8: 610, 9: 544, 10: 631, 11: 653, 12: 591, 13: 549, 14: 508, 15: 485})])", |
|
"epoch": 5.0, |
|
"eval_HasAns_exact": 67.6450742240216, |
|
"eval_HasAns_f1": 74.25189753904719, |
|
"eval_HasAns_total": 5928, |
|
"eval_NoAns_exact": 68.56181665264928, |
|
"eval_NoAns_f1": 68.56181665264928, |
|
"eval_NoAns_total": 5945, |
|
"eval_best_exact": 68.10410174345152, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 71.40278350976789, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 68.10410174345152, |
|
"eval_f1": 71.40278350976786, |
|
"eval_runtime": 15.8145, |
|
"eval_samples_per_second": 767.268, |
|
"eval_steps_per_second": 6.007, |
|
"eval_total": 11873, |
|
"step": 41175 |
|
}, |
|
{ |
|
"SuperNet": "OrderedDict([(<ElasticityDim.WIDTH: 'width'>, {0: 512, 1: 512, 2: 512, 3: 512, 4: 512, 5: 512, 6: 512, 7: 512, 8: 2048, 9: 2048, 10: 2048, 11: 2048, 12: 2048, 13: 2048, 14: 2048, 15: 2048})])", |
|
"epoch": 5.0, |
|
"eval_HasAns_exact": 71.44062078272604, |
|
"eval_HasAns_f1": 77.61788590740903, |
|
"eval_HasAns_total": 5928, |
|
"eval_NoAns_exact": 69.38603868797308, |
|
"eval_NoAns_f1": 69.38603868797308, |
|
"eval_NoAns_total": 5945, |
|
"eval_best_exact": 70.41185883938347, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 73.49606903555316, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 70.41185883938347, |
|
"eval_f1": 73.49606903555318, |
|
"eval_runtime": 19.4373, |
|
"eval_samples_per_second": 624.265, |
|
"eval_steps_per_second": 4.888, |
|
"eval_total": 11873, |
|
"step": 41175 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 41175, |
|
"total_flos": 3.828077231602176e+16, |
|
"train_loss": 1.1978534093045001, |
|
"train_runtime": 40881.8794, |
|
"train_samples_per_second": 16.114, |
|
"train_steps_per_second": 1.007 |
|
} |
|
], |
|
"max_steps": 41175, |
|
"min_subnet_acc": null, |
|
"min_subnet_best_acc": null, |
|
"num_train_epochs": 5, |
|
"supernet_acc": null, |
|
"supernet_best_acc": null, |
|
"total_flos": 3.828077231602176e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|