|
{ |
|
"best_metric": 0.9816143497757848, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-7800", |
|
"epoch": 99.36305732484077, |
|
"eval_steps": 500, |
|
"global_step": 7800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 2.7006075382232666, |
|
"learning_rate": 6.41025641025641e-07, |
|
"loss": 1.0769, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 2.1050777435302734, |
|
"learning_rate": 1.282051282051282e-06, |
|
"loss": 1.0669, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 3.808070182800293, |
|
"learning_rate": 1.9230769230769234e-06, |
|
"loss": 1.0657, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 2.6294384002685547, |
|
"learning_rate": 2.564102564102564e-06, |
|
"loss": 1.0561, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 4.274094104766846, |
|
"learning_rate": 3.205128205128205e-06, |
|
"loss": 1.0414, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 2.047131061553955, |
|
"learning_rate": 3.846153846153847e-06, |
|
"loss": 1.0187, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 2.6211767196655273, |
|
"learning_rate": 4.487179487179488e-06, |
|
"loss": 1.0105, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_accuracy": 0.5251121076233184, |
|
"eval_loss": 0.9788457155227661, |
|
"eval_runtime": 11.1593, |
|
"eval_samples_per_second": 399.665, |
|
"eval_steps_per_second": 3.136, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 2.5963022708892822, |
|
"learning_rate": 5.128205128205128e-06, |
|
"loss": 0.9866, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 4.090935230255127, |
|
"learning_rate": 5.76923076923077e-06, |
|
"loss": 0.9689, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"grad_norm": 3.1825904846191406, |
|
"learning_rate": 6.41025641025641e-06, |
|
"loss": 0.9478, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 3.811648368835449, |
|
"learning_rate": 7.051282051282052e-06, |
|
"loss": 0.933, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"grad_norm": 3.8240082263946533, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 0.9141, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"grad_norm": 3.7604594230651855, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.8997, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"grad_norm": 5.058235168457031, |
|
"learning_rate": 8.974358974358976e-06, |
|
"loss": 0.8839, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"grad_norm": 4.036779403686523, |
|
"learning_rate": 9.615384615384616e-06, |
|
"loss": 0.8749, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.6322869955156951, |
|
"eval_loss": 0.8166685104370117, |
|
"eval_runtime": 10.7913, |
|
"eval_samples_per_second": 413.295, |
|
"eval_steps_per_second": 3.243, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 3.1002399921417236, |
|
"learning_rate": 1.0256410256410256e-05, |
|
"loss": 0.8482, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"grad_norm": 4.336329460144043, |
|
"learning_rate": 1.0897435897435898e-05, |
|
"loss": 0.835, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 4.344544410705566, |
|
"learning_rate": 1.153846153846154e-05, |
|
"loss": 0.7967, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"grad_norm": 5.955235481262207, |
|
"learning_rate": 1.217948717948718e-05, |
|
"loss": 0.7853, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 6.097214698791504, |
|
"learning_rate": 1.282051282051282e-05, |
|
"loss": 0.7587, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"grad_norm": 6.144845485687256, |
|
"learning_rate": 1.3461538461538462e-05, |
|
"loss": 0.7538, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 6.462852954864502, |
|
"learning_rate": 1.4102564102564104e-05, |
|
"loss": 0.7286, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"grad_norm": 5.17026424407959, |
|
"learning_rate": 1.4743589743589745e-05, |
|
"loss": 0.7352, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_accuracy": 0.7179372197309417, |
|
"eval_loss": 0.6497997641563416, |
|
"eval_runtime": 10.8789, |
|
"eval_samples_per_second": 409.968, |
|
"eval_steps_per_second": 3.217, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"grad_norm": 5.981796741485596, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 0.6987, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"grad_norm": 6.265073299407959, |
|
"learning_rate": 1.602564102564103e-05, |
|
"loss": 0.6902, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"grad_norm": 3.7911741733551025, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.6888, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"grad_norm": 5.221859455108643, |
|
"learning_rate": 1.730769230769231e-05, |
|
"loss": 0.6771, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"grad_norm": 4.222895622253418, |
|
"learning_rate": 1.794871794871795e-05, |
|
"loss": 0.6579, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"grad_norm": 4.25372314453125, |
|
"learning_rate": 1.858974358974359e-05, |
|
"loss": 0.6529, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"grad_norm": 6.471427917480469, |
|
"learning_rate": 1.923076923076923e-05, |
|
"loss": 0.6488, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"grad_norm": 3.6297073364257812, |
|
"learning_rate": 1.987179487179487e-05, |
|
"loss": 0.647, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7630044843049327, |
|
"eval_loss": 0.5618513226509094, |
|
"eval_runtime": 10.9103, |
|
"eval_samples_per_second": 408.789, |
|
"eval_steps_per_second": 3.208, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"grad_norm": 3.593230724334717, |
|
"learning_rate": 2.0512820512820512e-05, |
|
"loss": 0.6343, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"grad_norm": 5.996413230895996, |
|
"learning_rate": 2.1153846153846154e-05, |
|
"loss": 0.6266, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"grad_norm": 3.9877495765686035, |
|
"learning_rate": 2.1794871794871795e-05, |
|
"loss": 0.629, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"grad_norm": 3.9979002475738525, |
|
"learning_rate": 2.2435897435897437e-05, |
|
"loss": 0.6199, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"grad_norm": 5.1187639236450195, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 0.601, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"grad_norm": 3.832275629043579, |
|
"learning_rate": 2.3717948717948718e-05, |
|
"loss": 0.601, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"grad_norm": 3.131157875061035, |
|
"learning_rate": 2.435897435897436e-05, |
|
"loss": 0.5921, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"grad_norm": 5.246669769287109, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.5949, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"eval_accuracy": 0.7892376681614349, |
|
"eval_loss": 0.5067703127861023, |
|
"eval_runtime": 10.7897, |
|
"eval_samples_per_second": 413.359, |
|
"eval_steps_per_second": 3.244, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"grad_norm": 3.668297052383423, |
|
"learning_rate": 2.564102564102564e-05, |
|
"loss": 0.5841, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"grad_norm": 3.633014440536499, |
|
"learning_rate": 2.6282051282051285e-05, |
|
"loss": 0.5847, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"grad_norm": 4.339975833892822, |
|
"learning_rate": 2.6923076923076923e-05, |
|
"loss": 0.5921, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"grad_norm": 4.13716459274292, |
|
"learning_rate": 2.756410256410257e-05, |
|
"loss": 0.5648, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"grad_norm": 5.4236063957214355, |
|
"learning_rate": 2.8205128205128207e-05, |
|
"loss": 0.5708, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"grad_norm": 4.541626453399658, |
|
"learning_rate": 2.8846153846153845e-05, |
|
"loss": 0.5835, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"grad_norm": 5.133627891540527, |
|
"learning_rate": 2.948717948717949e-05, |
|
"loss": 0.5705, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"grad_norm": 4.043761253356934, |
|
"learning_rate": 3.012820512820513e-05, |
|
"loss": 0.5773, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.805829596412556, |
|
"eval_loss": 0.4673232436180115, |
|
"eval_runtime": 10.582, |
|
"eval_samples_per_second": 421.47, |
|
"eval_steps_per_second": 3.307, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"grad_norm": 4.192721843719482, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 0.5543, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"grad_norm": 5.365765571594238, |
|
"learning_rate": 3.141025641025641e-05, |
|
"loss": 0.5417, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"grad_norm": 5.424711227416992, |
|
"learning_rate": 3.205128205128206e-05, |
|
"loss": 0.5687, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"grad_norm": 4.212834358215332, |
|
"learning_rate": 3.269230769230769e-05, |
|
"loss": 0.5573, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 6.62, |
|
"grad_norm": 4.826545238494873, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.5387, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"grad_norm": 4.972387313842773, |
|
"learning_rate": 3.397435897435898e-05, |
|
"loss": 0.5361, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"grad_norm": 4.788640022277832, |
|
"learning_rate": 3.461538461538462e-05, |
|
"loss": 0.5401, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"eval_accuracy": 0.8210762331838565, |
|
"eval_loss": 0.4277366101741791, |
|
"eval_runtime": 10.3618, |
|
"eval_samples_per_second": 430.427, |
|
"eval_steps_per_second": 3.378, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"grad_norm": 5.113460540771484, |
|
"learning_rate": 3.525641025641026e-05, |
|
"loss": 0.5512, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"grad_norm": 4.552058696746826, |
|
"learning_rate": 3.58974358974359e-05, |
|
"loss": 0.5286, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"grad_norm": 3.3090872764587402, |
|
"learning_rate": 3.653846153846154e-05, |
|
"loss": 0.5329, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 7.39, |
|
"grad_norm": 4.843341827392578, |
|
"learning_rate": 3.717948717948718e-05, |
|
"loss": 0.5134, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"grad_norm": 4.348685264587402, |
|
"learning_rate": 3.782051282051282e-05, |
|
"loss": 0.525, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"grad_norm": 5.437953472137451, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.5024, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"grad_norm": 4.2977986335754395, |
|
"learning_rate": 3.9102564102564105e-05, |
|
"loss": 0.538, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"grad_norm": 3.921816110610962, |
|
"learning_rate": 3.974358974358974e-05, |
|
"loss": 0.5116, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.831390134529148, |
|
"eval_loss": 0.40886422991752625, |
|
"eval_runtime": 10.3582, |
|
"eval_samples_per_second": 430.575, |
|
"eval_steps_per_second": 3.379, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"grad_norm": 3.6436734199523926, |
|
"learning_rate": 4.038461538461539e-05, |
|
"loss": 0.4756, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 8.15, |
|
"grad_norm": 7.276066303253174, |
|
"learning_rate": 4.1025641025641023e-05, |
|
"loss": 0.5389, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 8.28, |
|
"grad_norm": 5.59036922454834, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.5104, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"grad_norm": 5.391291618347168, |
|
"learning_rate": 4.230769230769231e-05, |
|
"loss": 0.4793, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 8.54, |
|
"grad_norm": 3.3373210430145264, |
|
"learning_rate": 4.294871794871795e-05, |
|
"loss": 0.4997, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 8.66, |
|
"grad_norm": 3.6154773235321045, |
|
"learning_rate": 4.358974358974359e-05, |
|
"loss": 0.4856, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"grad_norm": 4.880829811096191, |
|
"learning_rate": 4.423076923076923e-05, |
|
"loss": 0.4944, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"grad_norm": 4.5230584144592285, |
|
"learning_rate": 4.4871794871794874e-05, |
|
"loss": 0.4751, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 8.99, |
|
"eval_accuracy": 0.8399103139013453, |
|
"eval_loss": 0.3890719711780548, |
|
"eval_runtime": 10.3075, |
|
"eval_samples_per_second": 432.694, |
|
"eval_steps_per_second": 3.396, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"grad_norm": 3.3694958686828613, |
|
"learning_rate": 4.5512820512820516e-05, |
|
"loss": 0.4895, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 9.17, |
|
"grad_norm": 4.626150608062744, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 0.4721, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"grad_norm": 3.687791109085083, |
|
"learning_rate": 4.67948717948718e-05, |
|
"loss": 0.4672, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 9.43, |
|
"grad_norm": 3.2490782737731934, |
|
"learning_rate": 4.7435897435897435e-05, |
|
"loss": 0.4665, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"grad_norm": 4.15757417678833, |
|
"learning_rate": 4.8076923076923084e-05, |
|
"loss": 0.4646, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"grad_norm": 6.2055864334106445, |
|
"learning_rate": 4.871794871794872e-05, |
|
"loss": 0.4853, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 9.81, |
|
"grad_norm": 3.2382359504699707, |
|
"learning_rate": 4.935897435897436e-05, |
|
"loss": 0.4837, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 9.94, |
|
"grad_norm": 4.219330310821533, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4659, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.8573991031390135, |
|
"eval_loss": 0.3598979413509369, |
|
"eval_runtime": 10.2377, |
|
"eval_samples_per_second": 435.643, |
|
"eval_steps_per_second": 3.419, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 10.06, |
|
"grad_norm": 3.231353759765625, |
|
"learning_rate": 4.992877492877493e-05, |
|
"loss": 0.4682, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 10.19, |
|
"grad_norm": 4.601670742034912, |
|
"learning_rate": 4.985754985754986e-05, |
|
"loss": 0.4553, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 10.32, |
|
"grad_norm": 4.098852634429932, |
|
"learning_rate": 4.978632478632479e-05, |
|
"loss": 0.4374, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 10.45, |
|
"grad_norm": 3.5977771282196045, |
|
"learning_rate": 4.971509971509972e-05, |
|
"loss": 0.4478, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 10.57, |
|
"grad_norm": 4.338653564453125, |
|
"learning_rate": 4.9643874643874646e-05, |
|
"loss": 0.4441, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 10.7, |
|
"grad_norm": 4.484642505645752, |
|
"learning_rate": 4.9572649572649575e-05, |
|
"loss": 0.4597, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 10.83, |
|
"grad_norm": 4.2885966300964355, |
|
"learning_rate": 4.95014245014245e-05, |
|
"loss": 0.4315, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 10.96, |
|
"grad_norm": 5.804717540740967, |
|
"learning_rate": 4.943019943019943e-05, |
|
"loss": 0.4457, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 10.99, |
|
"eval_accuracy": 0.8591928251121076, |
|
"eval_loss": 0.34933042526245117, |
|
"eval_runtime": 10.3712, |
|
"eval_samples_per_second": 430.038, |
|
"eval_steps_per_second": 3.375, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 11.08, |
|
"grad_norm": 3.4534051418304443, |
|
"learning_rate": 4.935897435897436e-05, |
|
"loss": 0.431, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 11.21, |
|
"grad_norm": 4.114124774932861, |
|
"learning_rate": 4.928774928774929e-05, |
|
"loss": 0.4171, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 11.34, |
|
"grad_norm": 12.598810195922852, |
|
"learning_rate": 4.921652421652422e-05, |
|
"loss": 0.445, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 11.46, |
|
"grad_norm": 3.874229907989502, |
|
"learning_rate": 4.9145299145299147e-05, |
|
"loss": 0.397, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 11.59, |
|
"grad_norm": 4.273011684417725, |
|
"learning_rate": 4.9074074074074075e-05, |
|
"loss": 0.413, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 11.72, |
|
"grad_norm": 3.6375491619110107, |
|
"learning_rate": 4.9002849002849004e-05, |
|
"loss": 0.4079, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 11.85, |
|
"grad_norm": 4.725768089294434, |
|
"learning_rate": 4.893162393162393e-05, |
|
"loss": 0.437, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 11.97, |
|
"grad_norm": 5.251430988311768, |
|
"learning_rate": 4.886039886039887e-05, |
|
"loss": 0.4021, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.8751121076233184, |
|
"eval_loss": 0.317982017993927, |
|
"eval_runtime": 10.1336, |
|
"eval_samples_per_second": 440.118, |
|
"eval_steps_per_second": 3.454, |
|
"step": 942 |
|
}, |
|
{ |
|
"epoch": 12.1, |
|
"grad_norm": 3.173100471496582, |
|
"learning_rate": 4.878917378917379e-05, |
|
"loss": 0.3791, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 12.23, |
|
"grad_norm": 4.874154090881348, |
|
"learning_rate": 4.871794871794872e-05, |
|
"loss": 0.4091, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 12.36, |
|
"grad_norm": 3.875936985015869, |
|
"learning_rate": 4.864672364672365e-05, |
|
"loss": 0.4073, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 12.48, |
|
"grad_norm": 4.053490161895752, |
|
"learning_rate": 4.8575498575498576e-05, |
|
"loss": 0.3908, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 12.61, |
|
"grad_norm": 4.173293113708496, |
|
"learning_rate": 4.8504273504273505e-05, |
|
"loss": 0.3962, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 12.74, |
|
"grad_norm": 2.876476764678955, |
|
"learning_rate": 4.8433048433048433e-05, |
|
"loss": 0.409, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 12.87, |
|
"grad_norm": 4.63971471786499, |
|
"learning_rate": 4.836182336182337e-05, |
|
"loss": 0.3814, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 12.99, |
|
"grad_norm": 4.614744663238525, |
|
"learning_rate": 4.829059829059829e-05, |
|
"loss": 0.3925, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 12.99, |
|
"eval_accuracy": 0.886322869955157, |
|
"eval_loss": 0.2823837697505951, |
|
"eval_runtime": 10.629, |
|
"eval_samples_per_second": 419.606, |
|
"eval_steps_per_second": 3.293, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 13.12, |
|
"grad_norm": 3.8458189964294434, |
|
"learning_rate": 4.821937321937322e-05, |
|
"loss": 0.3691, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 13.25, |
|
"grad_norm": 5.067654609680176, |
|
"learning_rate": 4.814814814814815e-05, |
|
"loss": 0.3906, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 13.38, |
|
"grad_norm": 3.770333766937256, |
|
"learning_rate": 4.8076923076923084e-05, |
|
"loss": 0.3871, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"grad_norm": 3.551605463027954, |
|
"learning_rate": 4.8005698005698006e-05, |
|
"loss": 0.3768, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 13.63, |
|
"grad_norm": 4.692648887634277, |
|
"learning_rate": 4.7934472934472934e-05, |
|
"loss": 0.3758, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 13.76, |
|
"grad_norm": 4.032926082611084, |
|
"learning_rate": 4.786324786324787e-05, |
|
"loss": 0.3616, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 13.89, |
|
"grad_norm": 3.6287736892700195, |
|
"learning_rate": 4.779202279202279e-05, |
|
"loss": 0.3737, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.8959641255605382, |
|
"eval_loss": 0.2685752809047699, |
|
"eval_runtime": 10.3242, |
|
"eval_samples_per_second": 431.993, |
|
"eval_steps_per_second": 3.39, |
|
"step": 1099 |
|
}, |
|
{ |
|
"epoch": 14.01, |
|
"grad_norm": 4.15897274017334, |
|
"learning_rate": 4.772079772079772e-05, |
|
"loss": 0.3678, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 14.14, |
|
"grad_norm": 5.19406795501709, |
|
"learning_rate": 4.764957264957265e-05, |
|
"loss": 0.3729, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 14.27, |
|
"grad_norm": 3.7079544067382812, |
|
"learning_rate": 4.7578347578347584e-05, |
|
"loss": 0.3597, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 14.39, |
|
"grad_norm": 5.892209053039551, |
|
"learning_rate": 4.7507122507122506e-05, |
|
"loss": 0.358, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 14.52, |
|
"grad_norm": 3.5323147773742676, |
|
"learning_rate": 4.7435897435897435e-05, |
|
"loss": 0.3534, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 14.65, |
|
"grad_norm": 5.116568565368652, |
|
"learning_rate": 4.736467236467237e-05, |
|
"loss": 0.3418, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 14.78, |
|
"grad_norm": 2.994664430618286, |
|
"learning_rate": 4.72934472934473e-05, |
|
"loss": 0.3647, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"grad_norm": 5.119503974914551, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 0.3573, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 14.99, |
|
"eval_accuracy": 0.8977578475336323, |
|
"eval_loss": 0.2508743107318878, |
|
"eval_runtime": 10.38, |
|
"eval_samples_per_second": 429.674, |
|
"eval_steps_per_second": 3.372, |
|
"step": 1177 |
|
}, |
|
{ |
|
"epoch": 15.03, |
|
"grad_norm": 3.1542904376983643, |
|
"learning_rate": 4.7150997150997157e-05, |
|
"loss": 0.3428, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 15.16, |
|
"grad_norm": 3.757366418838501, |
|
"learning_rate": 4.7079772079772085e-05, |
|
"loss": 0.3365, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 15.29, |
|
"grad_norm": 3.979665517807007, |
|
"learning_rate": 4.700854700854701e-05, |
|
"loss": 0.3345, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 15.41, |
|
"grad_norm": 4.517212867736816, |
|
"learning_rate": 4.6937321937321936e-05, |
|
"loss": 0.3251, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 15.54, |
|
"grad_norm": 3.9786815643310547, |
|
"learning_rate": 4.686609686609687e-05, |
|
"loss": 0.338, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 15.67, |
|
"grad_norm": 6.16834831237793, |
|
"learning_rate": 4.67948717948718e-05, |
|
"loss": 0.3425, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 15.8, |
|
"grad_norm": 3.120864152908325, |
|
"learning_rate": 4.672364672364672e-05, |
|
"loss": 0.3374, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 15.92, |
|
"grad_norm": 5.18550443649292, |
|
"learning_rate": 4.665242165242166e-05, |
|
"loss": 0.3463, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.9100896860986547, |
|
"eval_loss": 0.22138647735118866, |
|
"eval_runtime": 10.4458, |
|
"eval_samples_per_second": 426.965, |
|
"eval_steps_per_second": 3.351, |
|
"step": 1256 |
|
}, |
|
{ |
|
"epoch": 16.05, |
|
"grad_norm": 3.5026066303253174, |
|
"learning_rate": 4.6581196581196586e-05, |
|
"loss": 0.3289, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 16.18, |
|
"grad_norm": 3.7427220344543457, |
|
"learning_rate": 4.6509971509971515e-05, |
|
"loss": 0.3376, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 16.31, |
|
"grad_norm": 4.764074325561523, |
|
"learning_rate": 4.643874643874644e-05, |
|
"loss": 0.3082, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 16.43, |
|
"grad_norm": 5.684610366821289, |
|
"learning_rate": 4.636752136752137e-05, |
|
"loss": 0.307, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 16.56, |
|
"grad_norm": 3.970574140548706, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.3215, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 16.69, |
|
"grad_norm": 3.9064781665802, |
|
"learning_rate": 4.622507122507122e-05, |
|
"loss": 0.3219, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 16.82, |
|
"grad_norm": 5.803124904632568, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 0.316, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 16.94, |
|
"grad_norm": 5.820592880249023, |
|
"learning_rate": 4.608262108262109e-05, |
|
"loss": 0.3174, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 16.99, |
|
"eval_accuracy": 0.9143497757847534, |
|
"eval_loss": 0.22333355247974396, |
|
"eval_runtime": 10.5076, |
|
"eval_samples_per_second": 424.456, |
|
"eval_steps_per_second": 3.331, |
|
"step": 1334 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"grad_norm": 3.744340419769287, |
|
"learning_rate": 4.6011396011396016e-05, |
|
"loss": 0.3105, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 17.2, |
|
"grad_norm": 4.38726806640625, |
|
"learning_rate": 4.594017094017094e-05, |
|
"loss": 0.3017, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 17.32, |
|
"grad_norm": 3.789621353149414, |
|
"learning_rate": 4.586894586894587e-05, |
|
"loss": 0.3139, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 17.45, |
|
"grad_norm": 4.819761753082275, |
|
"learning_rate": 4.57977207977208e-05, |
|
"loss": 0.2945, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 17.58, |
|
"grad_norm": 4.197057723999023, |
|
"learning_rate": 4.572649572649573e-05, |
|
"loss": 0.3118, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 17.71, |
|
"grad_norm": 4.414229393005371, |
|
"learning_rate": 4.565527065527066e-05, |
|
"loss": 0.3006, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 17.83, |
|
"grad_norm": 6.089674472808838, |
|
"learning_rate": 4.558404558404559e-05, |
|
"loss": 0.3064, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 17.96, |
|
"grad_norm": 3.0857927799224854, |
|
"learning_rate": 4.5512820512820516e-05, |
|
"loss": 0.3029, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.9221973094170404, |
|
"eval_loss": 0.20649677515029907, |
|
"eval_runtime": 10.5315, |
|
"eval_samples_per_second": 423.491, |
|
"eval_steps_per_second": 3.323, |
|
"step": 1413 |
|
}, |
|
{ |
|
"epoch": 18.09, |
|
"grad_norm": 3.7757556438446045, |
|
"learning_rate": 4.544159544159544e-05, |
|
"loss": 0.3047, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 18.22, |
|
"grad_norm": 3.210659980773926, |
|
"learning_rate": 4.5370370370370374e-05, |
|
"loss": 0.2969, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 18.34, |
|
"grad_norm": 3.7553064823150635, |
|
"learning_rate": 4.52991452991453e-05, |
|
"loss": 0.2778, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 18.47, |
|
"grad_norm": 4.472036361694336, |
|
"learning_rate": 4.522792022792023e-05, |
|
"loss": 0.2919, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 18.6, |
|
"grad_norm": 3.068850517272949, |
|
"learning_rate": 4.515669515669516e-05, |
|
"loss": 0.3074, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 18.73, |
|
"grad_norm": 4.442654609680176, |
|
"learning_rate": 4.508547008547009e-05, |
|
"loss": 0.3014, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 18.85, |
|
"grad_norm": 3.905217409133911, |
|
"learning_rate": 4.501424501424502e-05, |
|
"loss": 0.2956, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 18.98, |
|
"grad_norm": 4.504239559173584, |
|
"learning_rate": 4.4943019943019946e-05, |
|
"loss": 0.27, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 18.99, |
|
"eval_accuracy": 0.9286995515695067, |
|
"eval_loss": 0.18995393812656403, |
|
"eval_runtime": 10.258, |
|
"eval_samples_per_second": 434.783, |
|
"eval_steps_per_second": 3.412, |
|
"step": 1491 |
|
}, |
|
{ |
|
"epoch": 19.11, |
|
"grad_norm": 4.248488426208496, |
|
"learning_rate": 4.4871794871794874e-05, |
|
"loss": 0.2797, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 19.24, |
|
"grad_norm": 3.5203890800476074, |
|
"learning_rate": 4.48005698005698e-05, |
|
"loss": 0.2737, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 19.36, |
|
"grad_norm": 3.1374619007110596, |
|
"learning_rate": 4.472934472934473e-05, |
|
"loss": 0.2821, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 19.49, |
|
"grad_norm": 3.23164439201355, |
|
"learning_rate": 4.465811965811966e-05, |
|
"loss": 0.2728, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 19.62, |
|
"grad_norm": 3.956149101257324, |
|
"learning_rate": 4.458689458689459e-05, |
|
"loss": 0.2831, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 19.75, |
|
"grad_norm": 2.9227089881896973, |
|
"learning_rate": 4.451566951566952e-05, |
|
"loss": 0.2742, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 19.87, |
|
"grad_norm": 4.3883490562438965, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.2637, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 3.7743494510650635, |
|
"learning_rate": 4.4373219373219375e-05, |
|
"loss": 0.2796, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.929372197309417, |
|
"eval_loss": 0.18432486057281494, |
|
"eval_runtime": 10.4019, |
|
"eval_samples_per_second": 428.767, |
|
"eval_steps_per_second": 3.365, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 20.13, |
|
"grad_norm": 3.4726107120513916, |
|
"learning_rate": 4.4301994301994304e-05, |
|
"loss": 0.2705, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 20.25, |
|
"grad_norm": 3.691387891769409, |
|
"learning_rate": 4.423076923076923e-05, |
|
"loss": 0.2622, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 20.38, |
|
"grad_norm": 3.4888954162597656, |
|
"learning_rate": 4.415954415954416e-05, |
|
"loss": 0.2666, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 20.51, |
|
"grad_norm": 3.5790562629699707, |
|
"learning_rate": 4.408831908831909e-05, |
|
"loss": 0.26, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 20.64, |
|
"grad_norm": 3.888726234436035, |
|
"learning_rate": 4.401709401709402e-05, |
|
"loss": 0.2599, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 20.76, |
|
"grad_norm": 2.829801082611084, |
|
"learning_rate": 4.394586894586895e-05, |
|
"loss": 0.2547, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 20.89, |
|
"grad_norm": 4.912312030792236, |
|
"learning_rate": 4.3874643874643876e-05, |
|
"loss": 0.2649, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 20.99, |
|
"eval_accuracy": 0.9291479820627803, |
|
"eval_loss": 0.1811482161283493, |
|
"eval_runtime": 10.3055, |
|
"eval_samples_per_second": 432.78, |
|
"eval_steps_per_second": 3.396, |
|
"step": 1648 |
|
}, |
|
{ |
|
"epoch": 21.02, |
|
"grad_norm": 2.7831737995147705, |
|
"learning_rate": 4.3803418803418805e-05, |
|
"loss": 0.2606, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 21.15, |
|
"grad_norm": 4.410768508911133, |
|
"learning_rate": 4.3732193732193733e-05, |
|
"loss": 0.2461, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 21.27, |
|
"grad_norm": 4.2035675048828125, |
|
"learning_rate": 4.366096866096866e-05, |
|
"loss": 0.2497, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 21.4, |
|
"grad_norm": 4.2231011390686035, |
|
"learning_rate": 4.358974358974359e-05, |
|
"loss": 0.2647, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 21.53, |
|
"grad_norm": 3.350215435028076, |
|
"learning_rate": 4.351851851851852e-05, |
|
"loss": 0.2486, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 21.66, |
|
"grad_norm": 4.015892505645752, |
|
"learning_rate": 4.344729344729345e-05, |
|
"loss": 0.2756, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 21.78, |
|
"grad_norm": 2.7854413986206055, |
|
"learning_rate": 4.337606837606838e-05, |
|
"loss": 0.2442, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 21.91, |
|
"grad_norm": 4.043717861175537, |
|
"learning_rate": 4.3304843304843306e-05, |
|
"loss": 0.2561, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_accuracy": 0.9345291479820628, |
|
"eval_loss": 0.16786783933639526, |
|
"eval_runtime": 10.4139, |
|
"eval_samples_per_second": 428.274, |
|
"eval_steps_per_second": 3.361, |
|
"step": 1727 |
|
}, |
|
{ |
|
"epoch": 22.04, |
|
"grad_norm": 2.745814085006714, |
|
"learning_rate": 4.3233618233618234e-05, |
|
"loss": 0.2697, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 22.17, |
|
"grad_norm": 3.69480562210083, |
|
"learning_rate": 4.316239316239317e-05, |
|
"loss": 0.2559, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 22.29, |
|
"grad_norm": 3.843909502029419, |
|
"learning_rate": 4.309116809116809e-05, |
|
"loss": 0.2476, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 22.42, |
|
"grad_norm": 3.822366237640381, |
|
"learning_rate": 4.301994301994302e-05, |
|
"loss": 0.2268, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 22.55, |
|
"grad_norm": 3.71618390083313, |
|
"learning_rate": 4.294871794871795e-05, |
|
"loss": 0.2584, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 22.68, |
|
"grad_norm": 4.780421257019043, |
|
"learning_rate": 4.287749287749288e-05, |
|
"loss": 0.2486, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 22.8, |
|
"grad_norm": 4.906135082244873, |
|
"learning_rate": 4.2806267806267806e-05, |
|
"loss": 0.2454, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 22.93, |
|
"grad_norm": 3.733837366104126, |
|
"learning_rate": 4.2735042735042735e-05, |
|
"loss": 0.2384, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 22.99, |
|
"eval_accuracy": 0.940134529147982, |
|
"eval_loss": 0.15794554352760315, |
|
"eval_runtime": 10.3863, |
|
"eval_samples_per_second": 429.413, |
|
"eval_steps_per_second": 3.37, |
|
"step": 1805 |
|
}, |
|
{ |
|
"epoch": 23.06, |
|
"grad_norm": 3.1447577476501465, |
|
"learning_rate": 4.266381766381767e-05, |
|
"loss": 0.24, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 23.18, |
|
"grad_norm": 4.22783899307251, |
|
"learning_rate": 4.259259259259259e-05, |
|
"loss": 0.2304, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 23.31, |
|
"grad_norm": 3.896044969558716, |
|
"learning_rate": 4.252136752136752e-05, |
|
"loss": 0.2209, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 23.44, |
|
"grad_norm": 3.943885326385498, |
|
"learning_rate": 4.2450142450142457e-05, |
|
"loss": 0.2286, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 23.57, |
|
"grad_norm": 3.7429778575897217, |
|
"learning_rate": 4.2378917378917385e-05, |
|
"loss": 0.2209, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 23.69, |
|
"grad_norm": 3.867816209793091, |
|
"learning_rate": 4.230769230769231e-05, |
|
"loss": 0.25, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 23.82, |
|
"grad_norm": 3.7919929027557373, |
|
"learning_rate": 4.2236467236467236e-05, |
|
"loss": 0.255, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 23.95, |
|
"grad_norm": 3.726590156555176, |
|
"learning_rate": 4.216524216524217e-05, |
|
"loss": 0.2415, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.9461883408071748, |
|
"eval_loss": 0.1414378136396408, |
|
"eval_runtime": 10.1308, |
|
"eval_samples_per_second": 440.241, |
|
"eval_steps_per_second": 3.455, |
|
"step": 1884 |
|
}, |
|
{ |
|
"epoch": 24.08, |
|
"grad_norm": 4.867549419403076, |
|
"learning_rate": 4.209401709401709e-05, |
|
"loss": 0.2319, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 24.2, |
|
"grad_norm": 3.615717649459839, |
|
"learning_rate": 4.202279202279202e-05, |
|
"loss": 0.2315, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 24.33, |
|
"grad_norm": 3.8111448287963867, |
|
"learning_rate": 4.195156695156696e-05, |
|
"loss": 0.2395, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 24.46, |
|
"grad_norm": 3.761992931365967, |
|
"learning_rate": 4.1880341880341886e-05, |
|
"loss": 0.2211, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 24.59, |
|
"grad_norm": 4.226995468139648, |
|
"learning_rate": 4.180911680911681e-05, |
|
"loss": 0.2236, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 24.71, |
|
"grad_norm": 3.9813332557678223, |
|
"learning_rate": 4.1737891737891737e-05, |
|
"loss": 0.2384, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 24.84, |
|
"grad_norm": 3.4749677181243896, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.2181, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 24.97, |
|
"grad_norm": 3.7109665870666504, |
|
"learning_rate": 4.15954415954416e-05, |
|
"loss": 0.2282, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 24.99, |
|
"eval_accuracy": 0.9439461883408071, |
|
"eval_loss": 0.147759348154068, |
|
"eval_runtime": 10.416, |
|
"eval_samples_per_second": 428.189, |
|
"eval_steps_per_second": 3.36, |
|
"step": 1962 |
|
}, |
|
{ |
|
"epoch": 25.1, |
|
"grad_norm": 5.970840930938721, |
|
"learning_rate": 4.152421652421652e-05, |
|
"loss": 0.2231, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 25.22, |
|
"grad_norm": 4.683918476104736, |
|
"learning_rate": 4.145299145299146e-05, |
|
"loss": 0.2314, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 25.35, |
|
"grad_norm": 4.046987533569336, |
|
"learning_rate": 4.138176638176639e-05, |
|
"loss": 0.2415, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 25.48, |
|
"grad_norm": 4.222659111022949, |
|
"learning_rate": 4.131054131054131e-05, |
|
"loss": 0.2305, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 25.61, |
|
"grad_norm": 3.906933069229126, |
|
"learning_rate": 4.123931623931624e-05, |
|
"loss": 0.2053, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 25.73, |
|
"grad_norm": 3.3867435455322266, |
|
"learning_rate": 4.116809116809117e-05, |
|
"loss": 0.2116, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 25.86, |
|
"grad_norm": 3.771847724914551, |
|
"learning_rate": 4.10968660968661e-05, |
|
"loss": 0.2448, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 25.99, |
|
"grad_norm": 2.8068020343780518, |
|
"learning_rate": 4.1025641025641023e-05, |
|
"loss": 0.2345, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_accuracy": 0.9419282511210763, |
|
"eval_loss": 0.15225176513195038, |
|
"eval_runtime": 11.029, |
|
"eval_samples_per_second": 404.39, |
|
"eval_steps_per_second": 3.173, |
|
"step": 2041 |
|
}, |
|
{ |
|
"epoch": 26.11, |
|
"grad_norm": 4.059516906738281, |
|
"learning_rate": 4.095441595441596e-05, |
|
"loss": 0.2241, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 26.24, |
|
"grad_norm": 4.394323825836182, |
|
"learning_rate": 4.088319088319089e-05, |
|
"loss": 0.2269, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 26.37, |
|
"grad_norm": 4.457634925842285, |
|
"learning_rate": 4.0811965811965816e-05, |
|
"loss": 0.2171, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"grad_norm": 4.27789306640625, |
|
"learning_rate": 4.074074074074074e-05, |
|
"loss": 0.2229, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 26.62, |
|
"grad_norm": 4.969176292419434, |
|
"learning_rate": 4.0669515669515674e-05, |
|
"loss": 0.2116, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 26.75, |
|
"grad_norm": 3.5934197902679443, |
|
"learning_rate": 4.05982905982906e-05, |
|
"loss": 0.2135, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 26.88, |
|
"grad_norm": 6.578746795654297, |
|
"learning_rate": 4.0527065527065524e-05, |
|
"loss": 0.2196, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 26.99, |
|
"eval_accuracy": 0.952914798206278, |
|
"eval_loss": 0.12402135878801346, |
|
"eval_runtime": 10.1196, |
|
"eval_samples_per_second": 440.729, |
|
"eval_steps_per_second": 3.459, |
|
"step": 2119 |
|
}, |
|
{ |
|
"epoch": 27.01, |
|
"grad_norm": 4.893234729766846, |
|
"learning_rate": 4.045584045584046e-05, |
|
"loss": 0.2126, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 27.13, |
|
"grad_norm": 4.168965816497803, |
|
"learning_rate": 4.038461538461539e-05, |
|
"loss": 0.2142, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 27.26, |
|
"grad_norm": 3.5886390209198, |
|
"learning_rate": 4.031339031339032e-05, |
|
"loss": 0.2014, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 27.39, |
|
"grad_norm": 3.832911252975464, |
|
"learning_rate": 4.024216524216524e-05, |
|
"loss": 0.207, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 27.52, |
|
"grad_norm": 5.326879978179932, |
|
"learning_rate": 4.0170940170940174e-05, |
|
"loss": 0.2033, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 27.64, |
|
"grad_norm": 3.8248493671417236, |
|
"learning_rate": 4.00997150997151e-05, |
|
"loss": 0.2112, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 27.77, |
|
"grad_norm": 3.867006540298462, |
|
"learning_rate": 4.002849002849003e-05, |
|
"loss": 0.2135, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 27.9, |
|
"grad_norm": 3.587290048599243, |
|
"learning_rate": 3.995726495726496e-05, |
|
"loss": 0.2118, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.9502242152466368, |
|
"eval_loss": 0.12278711795806885, |
|
"eval_runtime": 10.6512, |
|
"eval_samples_per_second": 418.732, |
|
"eval_steps_per_second": 3.286, |
|
"step": 2198 |
|
}, |
|
{ |
|
"epoch": 28.03, |
|
"grad_norm": 5.586561679840088, |
|
"learning_rate": 3.988603988603989e-05, |
|
"loss": 0.213, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 28.15, |
|
"grad_norm": 3.0234127044677734, |
|
"learning_rate": 3.981481481481482e-05, |
|
"loss": 0.206, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 28.28, |
|
"grad_norm": 4.138052463531494, |
|
"learning_rate": 3.974358974358974e-05, |
|
"loss": 0.2212, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 28.41, |
|
"grad_norm": 3.2922720909118652, |
|
"learning_rate": 3.9672364672364675e-05, |
|
"loss": 0.1937, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 28.54, |
|
"grad_norm": 3.6713385581970215, |
|
"learning_rate": 3.9601139601139604e-05, |
|
"loss": 0.2051, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 28.66, |
|
"grad_norm": 3.211871385574341, |
|
"learning_rate": 3.952991452991453e-05, |
|
"loss": 0.2117, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 28.79, |
|
"grad_norm": 3.307561159133911, |
|
"learning_rate": 3.945868945868946e-05, |
|
"loss": 0.2161, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 28.92, |
|
"grad_norm": 3.385369300842285, |
|
"learning_rate": 3.938746438746439e-05, |
|
"loss": 0.1977, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 28.99, |
|
"eval_accuracy": 0.9565022421524664, |
|
"eval_loss": 0.11919865012168884, |
|
"eval_runtime": 10.3622, |
|
"eval_samples_per_second": 430.411, |
|
"eval_steps_per_second": 3.378, |
|
"step": 2276 |
|
}, |
|
{ |
|
"epoch": 29.04, |
|
"grad_norm": 5.586241245269775, |
|
"learning_rate": 3.931623931623932e-05, |
|
"loss": 0.21, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 29.17, |
|
"grad_norm": 3.34932017326355, |
|
"learning_rate": 3.924501424501425e-05, |
|
"loss": 0.2062, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 29.3, |
|
"grad_norm": 3.922372579574585, |
|
"learning_rate": 3.9173789173789176e-05, |
|
"loss": 0.2166, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 29.43, |
|
"grad_norm": 3.6068267822265625, |
|
"learning_rate": 3.9102564102564105e-05, |
|
"loss": 0.2046, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 29.55, |
|
"grad_norm": 4.987060070037842, |
|
"learning_rate": 3.903133903133903e-05, |
|
"loss": 0.1985, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 29.68, |
|
"grad_norm": 3.772573232650757, |
|
"learning_rate": 3.896011396011396e-05, |
|
"loss": 0.2037, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 29.81, |
|
"grad_norm": 4.787131309509277, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.2023, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 29.94, |
|
"grad_norm": 3.7376861572265625, |
|
"learning_rate": 3.881766381766382e-05, |
|
"loss": 0.1994, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy": 0.952914798206278, |
|
"eval_loss": 0.12083474546670914, |
|
"eval_runtime": 10.1108, |
|
"eval_samples_per_second": 441.111, |
|
"eval_steps_per_second": 3.462, |
|
"step": 2355 |
|
}, |
|
{ |
|
"epoch": 30.06, |
|
"grad_norm": 2.6949622631073, |
|
"learning_rate": 3.874643874643875e-05, |
|
"loss": 0.199, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 30.19, |
|
"grad_norm": 4.504613876342773, |
|
"learning_rate": 3.867521367521368e-05, |
|
"loss": 0.1856, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 30.32, |
|
"grad_norm": 3.8520376682281494, |
|
"learning_rate": 3.8603988603988605e-05, |
|
"loss": 0.1982, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 30.45, |
|
"grad_norm": 4.474611282348633, |
|
"learning_rate": 3.8532763532763534e-05, |
|
"loss": 0.199, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 30.57, |
|
"grad_norm": 3.9707252979278564, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.1993, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 30.7, |
|
"grad_norm": 4.545299530029297, |
|
"learning_rate": 3.839031339031339e-05, |
|
"loss": 0.2002, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 30.83, |
|
"grad_norm": 4.200654029846191, |
|
"learning_rate": 3.831908831908832e-05, |
|
"loss": 0.1955, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 30.96, |
|
"grad_norm": 3.393235206604004, |
|
"learning_rate": 3.824786324786325e-05, |
|
"loss": 0.1959, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 30.99, |
|
"eval_accuracy": 0.9562780269058296, |
|
"eval_loss": 0.11892925947904587, |
|
"eval_runtime": 10.326, |
|
"eval_samples_per_second": 431.92, |
|
"eval_steps_per_second": 3.39, |
|
"step": 2433 |
|
}, |
|
{ |
|
"epoch": 31.08, |
|
"grad_norm": 5.270153045654297, |
|
"learning_rate": 3.817663817663818e-05, |
|
"loss": 0.2021, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 31.21, |
|
"grad_norm": 2.6245462894439697, |
|
"learning_rate": 3.8105413105413106e-05, |
|
"loss": 0.1928, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 31.34, |
|
"grad_norm": 3.6266441345214844, |
|
"learning_rate": 3.8034188034188035e-05, |
|
"loss": 0.2017, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 31.46, |
|
"grad_norm": 4.613752365112305, |
|
"learning_rate": 3.7962962962962964e-05, |
|
"loss": 0.1922, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 31.59, |
|
"grad_norm": 3.5917015075683594, |
|
"learning_rate": 3.789173789173789e-05, |
|
"loss": 0.1888, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 31.72, |
|
"grad_norm": 4.044875621795654, |
|
"learning_rate": 3.782051282051282e-05, |
|
"loss": 0.195, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 31.85, |
|
"grad_norm": 3.4889109134674072, |
|
"learning_rate": 3.774928774928775e-05, |
|
"loss": 0.1872, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 31.97, |
|
"grad_norm": 3.380016565322876, |
|
"learning_rate": 3.767806267806268e-05, |
|
"loss": 0.1946, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy": 0.9616591928251121, |
|
"eval_loss": 0.11063708364963531, |
|
"eval_runtime": 10.3813, |
|
"eval_samples_per_second": 429.619, |
|
"eval_steps_per_second": 3.371, |
|
"step": 2512 |
|
}, |
|
{ |
|
"epoch": 32.1, |
|
"grad_norm": 2.8205275535583496, |
|
"learning_rate": 3.760683760683761e-05, |
|
"loss": 0.181, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 32.23, |
|
"grad_norm": 3.5672075748443604, |
|
"learning_rate": 3.7535612535612536e-05, |
|
"loss": 0.1799, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 32.36, |
|
"grad_norm": 2.782327651977539, |
|
"learning_rate": 3.746438746438747e-05, |
|
"loss": 0.1867, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 32.48, |
|
"grad_norm": 4.034256935119629, |
|
"learning_rate": 3.739316239316239e-05, |
|
"loss": 0.1787, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 32.61, |
|
"grad_norm": 4.922691345214844, |
|
"learning_rate": 3.732193732193732e-05, |
|
"loss": 0.1917, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 32.74, |
|
"grad_norm": 3.8344011306762695, |
|
"learning_rate": 3.725071225071226e-05, |
|
"loss": 0.1926, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 32.87, |
|
"grad_norm": 3.1199018955230713, |
|
"learning_rate": 3.717948717948718e-05, |
|
"loss": 0.1879, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 32.99, |
|
"grad_norm": 4.506677150726318, |
|
"learning_rate": 3.710826210826211e-05, |
|
"loss": 0.1777, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 32.99, |
|
"eval_accuracy": 0.9587443946188341, |
|
"eval_loss": 0.10475672781467438, |
|
"eval_runtime": 10.5123, |
|
"eval_samples_per_second": 424.265, |
|
"eval_steps_per_second": 3.329, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 33.12, |
|
"grad_norm": 3.60703444480896, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.1859, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 33.25, |
|
"grad_norm": 2.695091485977173, |
|
"learning_rate": 3.696581196581197e-05, |
|
"loss": 0.1898, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 33.38, |
|
"grad_norm": 5.429329872131348, |
|
"learning_rate": 3.6894586894586894e-05, |
|
"loss": 0.203, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"grad_norm": 4.286501407623291, |
|
"learning_rate": 3.682336182336182e-05, |
|
"loss": 0.189, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 33.63, |
|
"grad_norm": 3.461913585662842, |
|
"learning_rate": 3.675213675213676e-05, |
|
"loss": 0.1844, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 33.76, |
|
"grad_norm": 3.4661142826080322, |
|
"learning_rate": 3.668091168091169e-05, |
|
"loss": 0.1765, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 33.89, |
|
"grad_norm": 3.5034396648406982, |
|
"learning_rate": 3.660968660968661e-05, |
|
"loss": 0.1895, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_accuracy": 0.9643497757847533, |
|
"eval_loss": 0.09648270159959793, |
|
"eval_runtime": 10.4535, |
|
"eval_samples_per_second": 426.651, |
|
"eval_steps_per_second": 3.348, |
|
"step": 2669 |
|
}, |
|
{ |
|
"epoch": 34.01, |
|
"grad_norm": 3.3810720443725586, |
|
"learning_rate": 3.653846153846154e-05, |
|
"loss": 0.1745, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 34.14, |
|
"grad_norm": 3.2430992126464844, |
|
"learning_rate": 3.646723646723647e-05, |
|
"loss": 0.1728, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 34.27, |
|
"grad_norm": 3.9114370346069336, |
|
"learning_rate": 3.6396011396011395e-05, |
|
"loss": 0.1795, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 34.39, |
|
"grad_norm": 5.329428195953369, |
|
"learning_rate": 3.6324786324786323e-05, |
|
"loss": 0.1911, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 34.52, |
|
"grad_norm": 3.0880038738250732, |
|
"learning_rate": 3.625356125356126e-05, |
|
"loss": 0.1864, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 34.65, |
|
"grad_norm": 3.305945634841919, |
|
"learning_rate": 3.618233618233619e-05, |
|
"loss": 0.1756, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 34.78, |
|
"grad_norm": 3.8540592193603516, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.1808, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 34.9, |
|
"grad_norm": 3.3947036266326904, |
|
"learning_rate": 3.603988603988604e-05, |
|
"loss": 0.1779, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 34.99, |
|
"eval_accuracy": 0.9623318385650225, |
|
"eval_loss": 0.10805962979793549, |
|
"eval_runtime": 10.2969, |
|
"eval_samples_per_second": 433.139, |
|
"eval_steps_per_second": 3.399, |
|
"step": 2747 |
|
}, |
|
{ |
|
"epoch": 35.03, |
|
"grad_norm": 3.559788942337036, |
|
"learning_rate": 3.5968660968660974e-05, |
|
"loss": 0.1743, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 35.16, |
|
"grad_norm": 4.594892978668213, |
|
"learning_rate": 3.58974358974359e-05, |
|
"loss": 0.175, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 35.29, |
|
"grad_norm": 5.519185543060303, |
|
"learning_rate": 3.5826210826210824e-05, |
|
"loss": 0.1728, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 35.41, |
|
"grad_norm": 3.953115463256836, |
|
"learning_rate": 3.575498575498576e-05, |
|
"loss": 0.1758, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 35.54, |
|
"grad_norm": 4.416403770446777, |
|
"learning_rate": 3.568376068376069e-05, |
|
"loss": 0.175, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 35.67, |
|
"grad_norm": 3.546534776687622, |
|
"learning_rate": 3.561253561253561e-05, |
|
"loss": 0.186, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 35.8, |
|
"grad_norm": 2.469801664352417, |
|
"learning_rate": 3.554131054131054e-05, |
|
"loss": 0.1721, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 35.92, |
|
"grad_norm": 2.8391408920288086, |
|
"learning_rate": 3.5470085470085474e-05, |
|
"loss": 0.1717, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_accuracy": 0.9589686098654708, |
|
"eval_loss": 0.11545290797948837, |
|
"eval_runtime": 10.546, |
|
"eval_samples_per_second": 422.908, |
|
"eval_steps_per_second": 3.319, |
|
"step": 2826 |
|
}, |
|
{ |
|
"epoch": 36.05, |
|
"grad_norm": 2.674395799636841, |
|
"learning_rate": 3.53988603988604e-05, |
|
"loss": 0.1903, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 36.18, |
|
"grad_norm": 3.417855739593506, |
|
"learning_rate": 3.5327635327635325e-05, |
|
"loss": 0.179, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 36.31, |
|
"grad_norm": 3.361017942428589, |
|
"learning_rate": 3.525641025641026e-05, |
|
"loss": 0.1597, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 36.43, |
|
"grad_norm": 3.462763786315918, |
|
"learning_rate": 3.518518518518519e-05, |
|
"loss": 0.1754, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 36.56, |
|
"grad_norm": 4.020880699157715, |
|
"learning_rate": 3.511396011396012e-05, |
|
"loss": 0.1784, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 36.69, |
|
"grad_norm": 4.944223403930664, |
|
"learning_rate": 3.504273504273504e-05, |
|
"loss": 0.168, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 36.82, |
|
"grad_norm": 3.0020694732666016, |
|
"learning_rate": 3.4971509971509975e-05, |
|
"loss": 0.1765, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 36.94, |
|
"grad_norm": 3.5330681800842285, |
|
"learning_rate": 3.4900284900284904e-05, |
|
"loss": 0.1715, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 36.99, |
|
"eval_accuracy": 0.9630044843049327, |
|
"eval_loss": 0.1006338968873024, |
|
"eval_runtime": 10.331, |
|
"eval_samples_per_second": 431.709, |
|
"eval_steps_per_second": 3.388, |
|
"step": 2904 |
|
}, |
|
{ |
|
"epoch": 37.07, |
|
"grad_norm": 3.5816547870635986, |
|
"learning_rate": 3.4829059829059826e-05, |
|
"loss": 0.163, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 37.2, |
|
"grad_norm": 3.187361240386963, |
|
"learning_rate": 3.475783475783476e-05, |
|
"loss": 0.1637, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 37.32, |
|
"grad_norm": 4.5370612144470215, |
|
"learning_rate": 3.468660968660969e-05, |
|
"loss": 0.1698, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 37.45, |
|
"grad_norm": 4.927834510803223, |
|
"learning_rate": 3.461538461538462e-05, |
|
"loss": 0.1664, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 37.58, |
|
"grad_norm": 4.095933437347412, |
|
"learning_rate": 3.454415954415954e-05, |
|
"loss": 0.1554, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 37.71, |
|
"grad_norm": 3.43340802192688, |
|
"learning_rate": 3.4472934472934476e-05, |
|
"loss": 0.1735, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 37.83, |
|
"grad_norm": 4.661231994628906, |
|
"learning_rate": 3.4401709401709405e-05, |
|
"loss": 0.1866, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 37.96, |
|
"grad_norm": 3.5270962715148926, |
|
"learning_rate": 3.433048433048433e-05, |
|
"loss": 0.1748, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_accuracy": 0.9695067264573991, |
|
"eval_loss": 0.0894242599606514, |
|
"eval_runtime": 10.4549, |
|
"eval_samples_per_second": 426.594, |
|
"eval_steps_per_second": 3.348, |
|
"step": 2983 |
|
}, |
|
{ |
|
"epoch": 38.09, |
|
"grad_norm": 3.2506449222564697, |
|
"learning_rate": 3.425925925925926e-05, |
|
"loss": 0.1792, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 38.22, |
|
"grad_norm": 3.383273124694824, |
|
"learning_rate": 3.418803418803419e-05, |
|
"loss": 0.1672, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 38.34, |
|
"grad_norm": 4.260499000549316, |
|
"learning_rate": 3.411680911680912e-05, |
|
"loss": 0.1728, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 38.47, |
|
"grad_norm": 3.8010692596435547, |
|
"learning_rate": 3.404558404558404e-05, |
|
"loss": 0.1649, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 38.6, |
|
"grad_norm": 3.387493371963501, |
|
"learning_rate": 3.397435897435898e-05, |
|
"loss": 0.1653, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 38.73, |
|
"grad_norm": 2.461514711380005, |
|
"learning_rate": 3.3903133903133905e-05, |
|
"loss": 0.1598, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 38.85, |
|
"grad_norm": 2.754783868789673, |
|
"learning_rate": 3.3831908831908834e-05, |
|
"loss": 0.1613, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 38.98, |
|
"grad_norm": 3.63181209564209, |
|
"learning_rate": 3.376068376068376e-05, |
|
"loss": 0.1677, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 38.99, |
|
"eval_accuracy": 0.9639013452914799, |
|
"eval_loss": 0.10370239615440369, |
|
"eval_runtime": 10.8803, |
|
"eval_samples_per_second": 409.915, |
|
"eval_steps_per_second": 3.217, |
|
"step": 3061 |
|
}, |
|
{ |
|
"epoch": 39.11, |
|
"grad_norm": 3.2453739643096924, |
|
"learning_rate": 3.368945868945869e-05, |
|
"loss": 0.1749, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 39.24, |
|
"grad_norm": 3.9649202823638916, |
|
"learning_rate": 3.361823361823362e-05, |
|
"loss": 0.1663, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 39.36, |
|
"grad_norm": 4.201056957244873, |
|
"learning_rate": 3.354700854700855e-05, |
|
"loss": 0.1669, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 39.49, |
|
"grad_norm": 4.199280738830566, |
|
"learning_rate": 3.347578347578348e-05, |
|
"loss": 0.1699, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 39.62, |
|
"grad_norm": 2.895860433578491, |
|
"learning_rate": 3.3404558404558406e-05, |
|
"loss": 0.1686, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 39.75, |
|
"grad_norm": 2.9467713832855225, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.1662, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 39.87, |
|
"grad_norm": 3.6263599395751953, |
|
"learning_rate": 3.3262108262108264e-05, |
|
"loss": 0.1587, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"grad_norm": 3.581874370574951, |
|
"learning_rate": 3.319088319088319e-05, |
|
"loss": 0.1723, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy": 0.963677130044843, |
|
"eval_loss": 0.10090423375368118, |
|
"eval_runtime": 10.487, |
|
"eval_samples_per_second": 425.289, |
|
"eval_steps_per_second": 3.337, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 40.13, |
|
"grad_norm": 3.810126543045044, |
|
"learning_rate": 3.311965811965812e-05, |
|
"loss": 0.1535, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 40.25, |
|
"grad_norm": 3.254972219467163, |
|
"learning_rate": 3.304843304843305e-05, |
|
"loss": 0.165, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 40.38, |
|
"grad_norm": 3.7977054119110107, |
|
"learning_rate": 3.297720797720798e-05, |
|
"loss": 0.161, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 40.51, |
|
"grad_norm": 2.7377121448516846, |
|
"learning_rate": 3.290598290598291e-05, |
|
"loss": 0.1622, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 40.64, |
|
"grad_norm": 3.7134432792663574, |
|
"learning_rate": 3.2834757834757836e-05, |
|
"loss": 0.1672, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 40.76, |
|
"grad_norm": 4.389490127563477, |
|
"learning_rate": 3.2763532763532764e-05, |
|
"loss": 0.1653, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 40.89, |
|
"grad_norm": 2.8473117351531982, |
|
"learning_rate": 3.269230769230769e-05, |
|
"loss": 0.1552, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 40.99, |
|
"eval_accuracy": 0.9697309417040358, |
|
"eval_loss": 0.08673026412725449, |
|
"eval_runtime": 10.5751, |
|
"eval_samples_per_second": 421.744, |
|
"eval_steps_per_second": 3.31, |
|
"step": 3218 |
|
}, |
|
{ |
|
"epoch": 41.02, |
|
"grad_norm": 3.270151138305664, |
|
"learning_rate": 3.262108262108262e-05, |
|
"loss": 0.1578, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 41.15, |
|
"grad_norm": 2.5095419883728027, |
|
"learning_rate": 3.254985754985755e-05, |
|
"loss": 0.1576, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 41.27, |
|
"grad_norm": 4.268132209777832, |
|
"learning_rate": 3.247863247863248e-05, |
|
"loss": 0.1619, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 41.4, |
|
"grad_norm": 3.5522594451904297, |
|
"learning_rate": 3.240740740740741e-05, |
|
"loss": 0.1545, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 41.53, |
|
"grad_norm": 4.531596660614014, |
|
"learning_rate": 3.2336182336182337e-05, |
|
"loss": 0.1627, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 41.66, |
|
"grad_norm": 2.89469575881958, |
|
"learning_rate": 3.2264957264957265e-05, |
|
"loss": 0.1576, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 41.78, |
|
"grad_norm": 3.506288766860962, |
|
"learning_rate": 3.2193732193732194e-05, |
|
"loss": 0.1675, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 41.91, |
|
"grad_norm": 2.518845796585083, |
|
"learning_rate": 3.212250712250712e-05, |
|
"loss": 0.1658, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_accuracy": 0.9701793721973094, |
|
"eval_loss": 0.0859610065817833, |
|
"eval_runtime": 10.0539, |
|
"eval_samples_per_second": 443.611, |
|
"eval_steps_per_second": 3.481, |
|
"step": 3297 |
|
}, |
|
{ |
|
"epoch": 42.04, |
|
"grad_norm": 3.802457332611084, |
|
"learning_rate": 3.205128205128206e-05, |
|
"loss": 0.1662, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 42.17, |
|
"grad_norm": 3.1138010025024414, |
|
"learning_rate": 3.198005698005698e-05, |
|
"loss": 0.1531, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 42.29, |
|
"grad_norm": 6.375518321990967, |
|
"learning_rate": 3.190883190883191e-05, |
|
"loss": 0.1617, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 42.42, |
|
"grad_norm": 5.619466304779053, |
|
"learning_rate": 3.183760683760684e-05, |
|
"loss": 0.1585, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 42.55, |
|
"grad_norm": 2.600783586502075, |
|
"learning_rate": 3.176638176638177e-05, |
|
"loss": 0.1607, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 42.68, |
|
"grad_norm": 4.039291858673096, |
|
"learning_rate": 3.1695156695156695e-05, |
|
"loss": 0.1515, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 42.8, |
|
"grad_norm": 4.614203929901123, |
|
"learning_rate": 3.162393162393162e-05, |
|
"loss": 0.1368, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 42.93, |
|
"grad_norm": 2.795916795730591, |
|
"learning_rate": 3.155270655270656e-05, |
|
"loss": 0.1531, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 42.99, |
|
"eval_accuracy": 0.9672645739910314, |
|
"eval_loss": 0.09214626997709274, |
|
"eval_runtime": 10.26, |
|
"eval_samples_per_second": 434.7, |
|
"eval_steps_per_second": 3.411, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 43.06, |
|
"grad_norm": 3.106384038925171, |
|
"learning_rate": 3.148148148148148e-05, |
|
"loss": 0.1597, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 43.18, |
|
"grad_norm": 3.675632953643799, |
|
"learning_rate": 3.141025641025641e-05, |
|
"loss": 0.1531, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 43.31, |
|
"grad_norm": 3.027937173843384, |
|
"learning_rate": 3.133903133903134e-05, |
|
"loss": 0.161, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 43.44, |
|
"grad_norm": 3.678048849105835, |
|
"learning_rate": 3.1267806267806274e-05, |
|
"loss": 0.162, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 43.57, |
|
"grad_norm": 3.193264961242676, |
|
"learning_rate": 3.1196581196581195e-05, |
|
"loss": 0.1574, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 43.69, |
|
"grad_norm": 4.626323699951172, |
|
"learning_rate": 3.1125356125356124e-05, |
|
"loss": 0.1633, |
|
"step": 3430 |
|
}, |
|
{ |
|
"epoch": 43.82, |
|
"grad_norm": 4.2914018630981445, |
|
"learning_rate": 3.105413105413106e-05, |
|
"loss": 0.1563, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 43.95, |
|
"grad_norm": 3.208162784576416, |
|
"learning_rate": 3.098290598290599e-05, |
|
"loss": 0.1593, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_accuracy": 0.9645739910313902, |
|
"eval_loss": 0.09411517530679703, |
|
"eval_runtime": 10.3819, |
|
"eval_samples_per_second": 429.592, |
|
"eval_steps_per_second": 3.371, |
|
"step": 3454 |
|
}, |
|
{ |
|
"epoch": 44.08, |
|
"grad_norm": 4.032459735870361, |
|
"learning_rate": 3.091168091168091e-05, |
|
"loss": 0.1518, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 44.2, |
|
"grad_norm": 3.239495038986206, |
|
"learning_rate": 3.084045584045584e-05, |
|
"loss": 0.1519, |
|
"step": 3470 |
|
}, |
|
{ |
|
"epoch": 44.33, |
|
"grad_norm": 2.621792793273926, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 0.1385, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 44.46, |
|
"grad_norm": 4.725817680358887, |
|
"learning_rate": 3.0698005698005696e-05, |
|
"loss": 0.142, |
|
"step": 3490 |
|
}, |
|
{ |
|
"epoch": 44.59, |
|
"grad_norm": 4.686051368713379, |
|
"learning_rate": 3.0626780626780625e-05, |
|
"loss": 0.1542, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 44.71, |
|
"grad_norm": 4.348143577575684, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 0.1529, |
|
"step": 3510 |
|
}, |
|
{ |
|
"epoch": 44.84, |
|
"grad_norm": 2.644388437271118, |
|
"learning_rate": 3.0484330484330486e-05, |
|
"loss": 0.1524, |
|
"step": 3520 |
|
}, |
|
{ |
|
"epoch": 44.97, |
|
"grad_norm": 3.8867974281311035, |
|
"learning_rate": 3.0413105413105414e-05, |
|
"loss": 0.1568, |
|
"step": 3530 |
|
}, |
|
{ |
|
"epoch": 44.99, |
|
"eval_accuracy": 0.968609865470852, |
|
"eval_loss": 0.08786384016275406, |
|
"eval_runtime": 10.3766, |
|
"eval_samples_per_second": 429.814, |
|
"eval_steps_per_second": 3.373, |
|
"step": 3532 |
|
}, |
|
{ |
|
"epoch": 45.1, |
|
"grad_norm": 3.1211063861846924, |
|
"learning_rate": 3.034188034188034e-05, |
|
"loss": 0.1518, |
|
"step": 3540 |
|
}, |
|
{ |
|
"epoch": 45.22, |
|
"grad_norm": 4.649716854095459, |
|
"learning_rate": 3.0270655270655275e-05, |
|
"loss": 0.1519, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 45.35, |
|
"grad_norm": 2.5936279296875, |
|
"learning_rate": 3.01994301994302e-05, |
|
"loss": 0.1462, |
|
"step": 3560 |
|
}, |
|
{ |
|
"epoch": 45.48, |
|
"grad_norm": 3.2736833095550537, |
|
"learning_rate": 3.012820512820513e-05, |
|
"loss": 0.1516, |
|
"step": 3570 |
|
}, |
|
{ |
|
"epoch": 45.61, |
|
"grad_norm": 3.4682116508483887, |
|
"learning_rate": 3.005698005698006e-05, |
|
"loss": 0.1482, |
|
"step": 3580 |
|
}, |
|
{ |
|
"epoch": 45.73, |
|
"grad_norm": 4.235909938812256, |
|
"learning_rate": 2.9985754985754986e-05, |
|
"loss": 0.147, |
|
"step": 3590 |
|
}, |
|
{ |
|
"epoch": 45.86, |
|
"grad_norm": 2.4908273220062256, |
|
"learning_rate": 2.9914529914529915e-05, |
|
"loss": 0.1431, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 45.99, |
|
"grad_norm": 4.006827354431152, |
|
"learning_rate": 2.9843304843304844e-05, |
|
"loss": 0.1621, |
|
"step": 3610 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_accuracy": 0.9692825112107624, |
|
"eval_loss": 0.09294471889734268, |
|
"eval_runtime": 10.4845, |
|
"eval_samples_per_second": 425.39, |
|
"eval_steps_per_second": 3.338, |
|
"step": 3611 |
|
}, |
|
{ |
|
"epoch": 46.11, |
|
"grad_norm": 3.718818187713623, |
|
"learning_rate": 2.9772079772079776e-05, |
|
"loss": 0.1594, |
|
"step": 3620 |
|
}, |
|
{ |
|
"epoch": 46.24, |
|
"grad_norm": 3.664454936981201, |
|
"learning_rate": 2.97008547008547e-05, |
|
"loss": 0.1466, |
|
"step": 3630 |
|
}, |
|
{ |
|
"epoch": 46.37, |
|
"grad_norm": 3.0173661708831787, |
|
"learning_rate": 2.962962962962963e-05, |
|
"loss": 0.1544, |
|
"step": 3640 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"grad_norm": 3.583787679672241, |
|
"learning_rate": 2.9558404558404562e-05, |
|
"loss": 0.1559, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 46.62, |
|
"grad_norm": 3.0325767993927, |
|
"learning_rate": 2.948717948717949e-05, |
|
"loss": 0.1347, |
|
"step": 3660 |
|
}, |
|
{ |
|
"epoch": 46.75, |
|
"grad_norm": 3.1503076553344727, |
|
"learning_rate": 2.9415954415954416e-05, |
|
"loss": 0.1531, |
|
"step": 3670 |
|
}, |
|
{ |
|
"epoch": 46.88, |
|
"grad_norm": 3.430950164794922, |
|
"learning_rate": 2.9344729344729345e-05, |
|
"loss": 0.1602, |
|
"step": 3680 |
|
}, |
|
{ |
|
"epoch": 46.99, |
|
"eval_accuracy": 0.974439461883408, |
|
"eval_loss": 0.0777370035648346, |
|
"eval_runtime": 10.1977, |
|
"eval_samples_per_second": 437.355, |
|
"eval_steps_per_second": 3.432, |
|
"step": 3689 |
|
}, |
|
{ |
|
"epoch": 47.01, |
|
"grad_norm": 3.286393165588379, |
|
"learning_rate": 2.9273504273504277e-05, |
|
"loss": 0.1461, |
|
"step": 3690 |
|
}, |
|
{ |
|
"epoch": 47.13, |
|
"grad_norm": 4.135302543640137, |
|
"learning_rate": 2.9202279202279202e-05, |
|
"loss": 0.1468, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 47.26, |
|
"grad_norm": 3.4055960178375244, |
|
"learning_rate": 2.913105413105413e-05, |
|
"loss": 0.1402, |
|
"step": 3710 |
|
}, |
|
{ |
|
"epoch": 47.39, |
|
"grad_norm": 3.821648359298706, |
|
"learning_rate": 2.9059829059829063e-05, |
|
"loss": 0.1449, |
|
"step": 3720 |
|
}, |
|
{ |
|
"epoch": 47.52, |
|
"grad_norm": 3.148038148880005, |
|
"learning_rate": 2.898860398860399e-05, |
|
"loss": 0.1386, |
|
"step": 3730 |
|
}, |
|
{ |
|
"epoch": 47.64, |
|
"grad_norm": 2.2282421588897705, |
|
"learning_rate": 2.8917378917378917e-05, |
|
"loss": 0.1423, |
|
"step": 3740 |
|
}, |
|
{ |
|
"epoch": 47.77, |
|
"grad_norm": 3.5172359943389893, |
|
"learning_rate": 2.8846153846153845e-05, |
|
"loss": 0.1531, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 47.9, |
|
"grad_norm": 3.5688581466674805, |
|
"learning_rate": 2.8774928774928778e-05, |
|
"loss": 0.1397, |
|
"step": 3760 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_accuracy": 0.9733183856502242, |
|
"eval_loss": 0.08208809792995453, |
|
"eval_runtime": 10.4302, |
|
"eval_samples_per_second": 427.603, |
|
"eval_steps_per_second": 3.356, |
|
"step": 3768 |
|
}, |
|
{ |
|
"epoch": 48.03, |
|
"grad_norm": 2.843202829360962, |
|
"learning_rate": 2.8703703703703706e-05, |
|
"loss": 0.1411, |
|
"step": 3770 |
|
}, |
|
{ |
|
"epoch": 48.15, |
|
"grad_norm": 3.353113889694214, |
|
"learning_rate": 2.863247863247863e-05, |
|
"loss": 0.145, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 48.28, |
|
"grad_norm": 3.2403688430786133, |
|
"learning_rate": 2.8561253561253564e-05, |
|
"loss": 0.1338, |
|
"step": 3790 |
|
}, |
|
{ |
|
"epoch": 48.41, |
|
"grad_norm": 2.347370147705078, |
|
"learning_rate": 2.8490028490028492e-05, |
|
"loss": 0.1355, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 48.54, |
|
"grad_norm": 3.2337448596954346, |
|
"learning_rate": 2.8418803418803418e-05, |
|
"loss": 0.1424, |
|
"step": 3810 |
|
}, |
|
{ |
|
"epoch": 48.66, |
|
"grad_norm": 4.670347213745117, |
|
"learning_rate": 2.8347578347578346e-05, |
|
"loss": 0.1485, |
|
"step": 3820 |
|
}, |
|
{ |
|
"epoch": 48.79, |
|
"grad_norm": 5.514860153198242, |
|
"learning_rate": 2.827635327635328e-05, |
|
"loss": 0.1551, |
|
"step": 3830 |
|
}, |
|
{ |
|
"epoch": 48.92, |
|
"grad_norm": 4.811940670013428, |
|
"learning_rate": 2.8205128205128207e-05, |
|
"loss": 0.1475, |
|
"step": 3840 |
|
}, |
|
{ |
|
"epoch": 48.99, |
|
"eval_accuracy": 0.9710762331838565, |
|
"eval_loss": 0.08098708093166351, |
|
"eval_runtime": 10.2161, |
|
"eval_samples_per_second": 436.568, |
|
"eval_steps_per_second": 3.426, |
|
"step": 3846 |
|
}, |
|
{ |
|
"epoch": 49.04, |
|
"grad_norm": 2.1994664669036865, |
|
"learning_rate": 2.8133903133903132e-05, |
|
"loss": 0.1395, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 49.17, |
|
"grad_norm": 3.298173666000366, |
|
"learning_rate": 2.8062678062678068e-05, |
|
"loss": 0.1446, |
|
"step": 3860 |
|
}, |
|
{ |
|
"epoch": 49.3, |
|
"grad_norm": 3.0983335971832275, |
|
"learning_rate": 2.7991452991452993e-05, |
|
"loss": 0.1445, |
|
"step": 3870 |
|
}, |
|
{ |
|
"epoch": 49.43, |
|
"grad_norm": 2.741196393966675, |
|
"learning_rate": 2.7920227920227922e-05, |
|
"loss": 0.1398, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 49.55, |
|
"grad_norm": 4.882288932800293, |
|
"learning_rate": 2.7849002849002847e-05, |
|
"loss": 0.1393, |
|
"step": 3890 |
|
}, |
|
{ |
|
"epoch": 49.68, |
|
"grad_norm": 3.602112054824829, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.1358, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 49.81, |
|
"grad_norm": 3.6309425830841064, |
|
"learning_rate": 2.7706552706552708e-05, |
|
"loss": 0.1431, |
|
"step": 3910 |
|
}, |
|
{ |
|
"epoch": 49.94, |
|
"grad_norm": 4.7522101402282715, |
|
"learning_rate": 2.7635327635327633e-05, |
|
"loss": 0.1452, |
|
"step": 3920 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy": 0.9697309417040358, |
|
"eval_loss": 0.0942649319767952, |
|
"eval_runtime": 10.3942, |
|
"eval_samples_per_second": 429.086, |
|
"eval_steps_per_second": 3.367, |
|
"step": 3925 |
|
}, |
|
{ |
|
"epoch": 50.06, |
|
"grad_norm": 2.828382730484009, |
|
"learning_rate": 2.756410256410257e-05, |
|
"loss": 0.1416, |
|
"step": 3930 |
|
}, |
|
{ |
|
"epoch": 50.19, |
|
"grad_norm": 2.9188289642333984, |
|
"learning_rate": 2.7492877492877494e-05, |
|
"loss": 0.1428, |
|
"step": 3940 |
|
}, |
|
{ |
|
"epoch": 50.32, |
|
"grad_norm": 3.1731650829315186, |
|
"learning_rate": 2.7421652421652423e-05, |
|
"loss": 0.1275, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 50.45, |
|
"grad_norm": 3.445089340209961, |
|
"learning_rate": 2.7350427350427355e-05, |
|
"loss": 0.138, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 50.57, |
|
"grad_norm": 3.163792133331299, |
|
"learning_rate": 2.7279202279202283e-05, |
|
"loss": 0.1387, |
|
"step": 3970 |
|
}, |
|
{ |
|
"epoch": 50.7, |
|
"grad_norm": 2.461280107498169, |
|
"learning_rate": 2.720797720797721e-05, |
|
"loss": 0.1439, |
|
"step": 3980 |
|
}, |
|
{ |
|
"epoch": 50.83, |
|
"grad_norm": 3.221646785736084, |
|
"learning_rate": 2.7136752136752137e-05, |
|
"loss": 0.125, |
|
"step": 3990 |
|
}, |
|
{ |
|
"epoch": 50.96, |
|
"grad_norm": 4.313431262969971, |
|
"learning_rate": 2.706552706552707e-05, |
|
"loss": 0.148, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 50.99, |
|
"eval_accuracy": 0.9704035874439462, |
|
"eval_loss": 0.0943058580160141, |
|
"eval_runtime": 10.7055, |
|
"eval_samples_per_second": 416.608, |
|
"eval_steps_per_second": 3.269, |
|
"step": 4003 |
|
}, |
|
{ |
|
"epoch": 51.08, |
|
"grad_norm": 3.418119192123413, |
|
"learning_rate": 2.6994301994301995e-05, |
|
"loss": 0.1299, |
|
"step": 4010 |
|
}, |
|
{ |
|
"epoch": 51.21, |
|
"grad_norm": 3.145573854446411, |
|
"learning_rate": 2.6923076923076923e-05, |
|
"loss": 0.1406, |
|
"step": 4020 |
|
}, |
|
{ |
|
"epoch": 51.34, |
|
"grad_norm": 3.141972541809082, |
|
"learning_rate": 2.6851851851851855e-05, |
|
"loss": 0.1463, |
|
"step": 4030 |
|
}, |
|
{ |
|
"epoch": 51.46, |
|
"grad_norm": 2.996499538421631, |
|
"learning_rate": 2.6780626780626784e-05, |
|
"loss": 0.1453, |
|
"step": 4040 |
|
}, |
|
{ |
|
"epoch": 51.59, |
|
"grad_norm": 8.0499849319458, |
|
"learning_rate": 2.670940170940171e-05, |
|
"loss": 0.1423, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 51.72, |
|
"grad_norm": 2.710381507873535, |
|
"learning_rate": 2.6638176638176638e-05, |
|
"loss": 0.145, |
|
"step": 4060 |
|
}, |
|
{ |
|
"epoch": 51.85, |
|
"grad_norm": 2.674241304397583, |
|
"learning_rate": 2.656695156695157e-05, |
|
"loss": 0.1447, |
|
"step": 4070 |
|
}, |
|
{ |
|
"epoch": 51.97, |
|
"grad_norm": 4.219669342041016, |
|
"learning_rate": 2.64957264957265e-05, |
|
"loss": 0.1392, |
|
"step": 4080 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"eval_accuracy": 0.9695067264573991, |
|
"eval_loss": 0.08689579367637634, |
|
"eval_runtime": 10.4402, |
|
"eval_samples_per_second": 427.196, |
|
"eval_steps_per_second": 3.352, |
|
"step": 4082 |
|
}, |
|
{ |
|
"epoch": 52.1, |
|
"grad_norm": 3.582681179046631, |
|
"learning_rate": 2.6424501424501424e-05, |
|
"loss": 0.1486, |
|
"step": 4090 |
|
}, |
|
{ |
|
"epoch": 52.23, |
|
"grad_norm": 5.496790409088135, |
|
"learning_rate": 2.6353276353276356e-05, |
|
"loss": 0.1415, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 52.36, |
|
"grad_norm": 3.7762444019317627, |
|
"learning_rate": 2.6282051282051285e-05, |
|
"loss": 0.1414, |
|
"step": 4110 |
|
}, |
|
{ |
|
"epoch": 52.48, |
|
"grad_norm": 3.185096502304077, |
|
"learning_rate": 2.621082621082621e-05, |
|
"loss": 0.1394, |
|
"step": 4120 |
|
}, |
|
{ |
|
"epoch": 52.61, |
|
"grad_norm": 3.969970941543579, |
|
"learning_rate": 2.613960113960114e-05, |
|
"loss": 0.1471, |
|
"step": 4130 |
|
}, |
|
{ |
|
"epoch": 52.74, |
|
"grad_norm": 3.0576138496398926, |
|
"learning_rate": 2.606837606837607e-05, |
|
"loss": 0.1425, |
|
"step": 4140 |
|
}, |
|
{ |
|
"epoch": 52.87, |
|
"grad_norm": 3.9330620765686035, |
|
"learning_rate": 2.5997150997151e-05, |
|
"loss": 0.1474, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 52.99, |
|
"grad_norm": 4.062915325164795, |
|
"learning_rate": 2.5925925925925925e-05, |
|
"loss": 0.1389, |
|
"step": 4160 |
|
}, |
|
{ |
|
"epoch": 52.99, |
|
"eval_accuracy": 0.9737668161434978, |
|
"eval_loss": 0.08755916357040405, |
|
"eval_runtime": 10.2788, |
|
"eval_samples_per_second": 433.903, |
|
"eval_steps_per_second": 3.405, |
|
"step": 4160 |
|
}, |
|
{ |
|
"epoch": 53.12, |
|
"grad_norm": 5.062565803527832, |
|
"learning_rate": 2.5854700854700857e-05, |
|
"loss": 0.1362, |
|
"step": 4170 |
|
}, |
|
{ |
|
"epoch": 53.25, |
|
"grad_norm": 3.4343032836914062, |
|
"learning_rate": 2.5783475783475786e-05, |
|
"loss": 0.1416, |
|
"step": 4180 |
|
}, |
|
{ |
|
"epoch": 53.38, |
|
"grad_norm": 2.700774669647217, |
|
"learning_rate": 2.5712250712250714e-05, |
|
"loss": 0.1326, |
|
"step": 4190 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"grad_norm": 3.6101787090301514, |
|
"learning_rate": 2.564102564102564e-05, |
|
"loss": 0.1425, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 53.63, |
|
"grad_norm": 3.596529960632324, |
|
"learning_rate": 2.5569800569800572e-05, |
|
"loss": 0.1282, |
|
"step": 4210 |
|
}, |
|
{ |
|
"epoch": 53.76, |
|
"grad_norm": 3.493985891342163, |
|
"learning_rate": 2.54985754985755e-05, |
|
"loss": 0.1426, |
|
"step": 4220 |
|
}, |
|
{ |
|
"epoch": 53.89, |
|
"grad_norm": 4.897692680358887, |
|
"learning_rate": 2.5427350427350426e-05, |
|
"loss": 0.1295, |
|
"step": 4230 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"eval_accuracy": 0.9748878923766816, |
|
"eval_loss": 0.07158580422401428, |
|
"eval_runtime": 10.2959, |
|
"eval_samples_per_second": 433.183, |
|
"eval_steps_per_second": 3.399, |
|
"step": 4239 |
|
}, |
|
{ |
|
"epoch": 54.01, |
|
"grad_norm": 2.9666993618011475, |
|
"learning_rate": 2.535612535612536e-05, |
|
"loss": 0.1345, |
|
"step": 4240 |
|
}, |
|
{ |
|
"epoch": 54.14, |
|
"grad_norm": 3.897339105606079, |
|
"learning_rate": 2.5284900284900286e-05, |
|
"loss": 0.1323, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 54.27, |
|
"grad_norm": 2.5065248012542725, |
|
"learning_rate": 2.5213675213675215e-05, |
|
"loss": 0.1279, |
|
"step": 4260 |
|
}, |
|
{ |
|
"epoch": 54.39, |
|
"grad_norm": 5.935027122497559, |
|
"learning_rate": 2.514245014245014e-05, |
|
"loss": 0.1365, |
|
"step": 4270 |
|
}, |
|
{ |
|
"epoch": 54.52, |
|
"grad_norm": 3.2909152507781982, |
|
"learning_rate": 2.5071225071225073e-05, |
|
"loss": 0.1465, |
|
"step": 4280 |
|
}, |
|
{ |
|
"epoch": 54.65, |
|
"grad_norm": 3.701512336730957, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.1278, |
|
"step": 4290 |
|
}, |
|
{ |
|
"epoch": 54.78, |
|
"grad_norm": 3.227336883544922, |
|
"learning_rate": 2.492877492877493e-05, |
|
"loss": 0.1398, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 54.9, |
|
"grad_norm": 4.817599773406982, |
|
"learning_rate": 2.485754985754986e-05, |
|
"loss": 0.1394, |
|
"step": 4310 |
|
}, |
|
{ |
|
"epoch": 54.99, |
|
"eval_accuracy": 0.9751121076233183, |
|
"eval_loss": 0.07604929804801941, |
|
"eval_runtime": 10.3795, |
|
"eval_samples_per_second": 429.695, |
|
"eval_steps_per_second": 3.372, |
|
"step": 4317 |
|
}, |
|
{ |
|
"epoch": 55.03, |
|
"grad_norm": 3.4743528366088867, |
|
"learning_rate": 2.4786324786324787e-05, |
|
"loss": 0.139, |
|
"step": 4320 |
|
}, |
|
{ |
|
"epoch": 55.16, |
|
"grad_norm": 3.180518865585327, |
|
"learning_rate": 2.4715099715099716e-05, |
|
"loss": 0.1309, |
|
"step": 4330 |
|
}, |
|
{ |
|
"epoch": 55.29, |
|
"grad_norm": 4.160658359527588, |
|
"learning_rate": 2.4643874643874645e-05, |
|
"loss": 0.1303, |
|
"step": 4340 |
|
}, |
|
{ |
|
"epoch": 55.41, |
|
"grad_norm": 4.241569519042969, |
|
"learning_rate": 2.4572649572649573e-05, |
|
"loss": 0.1269, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 55.54, |
|
"grad_norm": 3.5420875549316406, |
|
"learning_rate": 2.4501424501424502e-05, |
|
"loss": 0.1363, |
|
"step": 4360 |
|
}, |
|
{ |
|
"epoch": 55.67, |
|
"grad_norm": 3.2201666831970215, |
|
"learning_rate": 2.4430199430199434e-05, |
|
"loss": 0.1339, |
|
"step": 4370 |
|
}, |
|
{ |
|
"epoch": 55.8, |
|
"grad_norm": 3.6691396236419678, |
|
"learning_rate": 2.435897435897436e-05, |
|
"loss": 0.1212, |
|
"step": 4380 |
|
}, |
|
{ |
|
"epoch": 55.92, |
|
"grad_norm": 3.5013792514801025, |
|
"learning_rate": 2.4287749287749288e-05, |
|
"loss": 0.1354, |
|
"step": 4390 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"eval_accuracy": 0.9726457399103139, |
|
"eval_loss": 0.07711587846279144, |
|
"eval_runtime": 10.631, |
|
"eval_samples_per_second": 419.529, |
|
"eval_steps_per_second": 3.292, |
|
"step": 4396 |
|
}, |
|
{ |
|
"epoch": 56.05, |
|
"grad_norm": 3.276965618133545, |
|
"learning_rate": 2.4216524216524217e-05, |
|
"loss": 0.1331, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 56.18, |
|
"grad_norm": 4.311830997467041, |
|
"learning_rate": 2.4145299145299145e-05, |
|
"loss": 0.1376, |
|
"step": 4410 |
|
}, |
|
{ |
|
"epoch": 56.31, |
|
"grad_norm": 3.6508076190948486, |
|
"learning_rate": 2.4074074074074074e-05, |
|
"loss": 0.1415, |
|
"step": 4420 |
|
}, |
|
{ |
|
"epoch": 56.43, |
|
"grad_norm": 4.303562641143799, |
|
"learning_rate": 2.4002849002849003e-05, |
|
"loss": 0.1298, |
|
"step": 4430 |
|
}, |
|
{ |
|
"epoch": 56.56, |
|
"grad_norm": 3.281374216079712, |
|
"learning_rate": 2.3931623931623935e-05, |
|
"loss": 0.1273, |
|
"step": 4440 |
|
}, |
|
{ |
|
"epoch": 56.69, |
|
"grad_norm": 4.081586837768555, |
|
"learning_rate": 2.386039886039886e-05, |
|
"loss": 0.1287, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 56.82, |
|
"grad_norm": 5.350743293762207, |
|
"learning_rate": 2.3789173789173792e-05, |
|
"loss": 0.1336, |
|
"step": 4460 |
|
}, |
|
{ |
|
"epoch": 56.94, |
|
"grad_norm": 3.5683207511901855, |
|
"learning_rate": 2.3717948717948718e-05, |
|
"loss": 0.1303, |
|
"step": 4470 |
|
}, |
|
{ |
|
"epoch": 56.99, |
|
"eval_accuracy": 0.9737668161434978, |
|
"eval_loss": 0.07378443330526352, |
|
"eval_runtime": 10.3059, |
|
"eval_samples_per_second": 432.762, |
|
"eval_steps_per_second": 3.396, |
|
"step": 4474 |
|
}, |
|
{ |
|
"epoch": 57.07, |
|
"grad_norm": 4.208488941192627, |
|
"learning_rate": 2.364672364672365e-05, |
|
"loss": 0.1318, |
|
"step": 4480 |
|
}, |
|
{ |
|
"epoch": 57.2, |
|
"grad_norm": 3.7957112789154053, |
|
"learning_rate": 2.3575498575498578e-05, |
|
"loss": 0.1296, |
|
"step": 4490 |
|
}, |
|
{ |
|
"epoch": 57.32, |
|
"grad_norm": 3.4968860149383545, |
|
"learning_rate": 2.3504273504273504e-05, |
|
"loss": 0.1208, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 57.45, |
|
"grad_norm": 3.205312728881836, |
|
"learning_rate": 2.3433048433048436e-05, |
|
"loss": 0.129, |
|
"step": 4510 |
|
}, |
|
{ |
|
"epoch": 57.58, |
|
"grad_norm": 3.5443649291992188, |
|
"learning_rate": 2.336182336182336e-05, |
|
"loss": 0.1249, |
|
"step": 4520 |
|
}, |
|
{ |
|
"epoch": 57.71, |
|
"grad_norm": 4.887803554534912, |
|
"learning_rate": 2.3290598290598293e-05, |
|
"loss": 0.1384, |
|
"step": 4530 |
|
}, |
|
{ |
|
"epoch": 57.83, |
|
"grad_norm": 3.6573920249938965, |
|
"learning_rate": 2.321937321937322e-05, |
|
"loss": 0.1215, |
|
"step": 4540 |
|
}, |
|
{ |
|
"epoch": 57.96, |
|
"grad_norm": 2.767714500427246, |
|
"learning_rate": 2.314814814814815e-05, |
|
"loss": 0.1274, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"eval_accuracy": 0.9724215246636772, |
|
"eval_loss": 0.08340190351009369, |
|
"eval_runtime": 10.2608, |
|
"eval_samples_per_second": 434.665, |
|
"eval_steps_per_second": 3.411, |
|
"step": 4553 |
|
}, |
|
{ |
|
"epoch": 58.09, |
|
"grad_norm": 2.663431406021118, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 0.1336, |
|
"step": 4560 |
|
}, |
|
{ |
|
"epoch": 58.22, |
|
"grad_norm": 3.646914482116699, |
|
"learning_rate": 2.3005698005698008e-05, |
|
"loss": 0.1225, |
|
"step": 4570 |
|
}, |
|
{ |
|
"epoch": 58.34, |
|
"grad_norm": 4.5192461013793945, |
|
"learning_rate": 2.2934472934472936e-05, |
|
"loss": 0.1305, |
|
"step": 4580 |
|
}, |
|
{ |
|
"epoch": 58.47, |
|
"grad_norm": 3.2621636390686035, |
|
"learning_rate": 2.2863247863247865e-05, |
|
"loss": 0.1223, |
|
"step": 4590 |
|
}, |
|
{ |
|
"epoch": 58.6, |
|
"grad_norm": 4.150683879852295, |
|
"learning_rate": 2.2792022792022794e-05, |
|
"loss": 0.1307, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 58.73, |
|
"grad_norm": 2.834435224533081, |
|
"learning_rate": 2.272079772079772e-05, |
|
"loss": 0.1312, |
|
"step": 4610 |
|
}, |
|
{ |
|
"epoch": 58.85, |
|
"grad_norm": 4.989063739776611, |
|
"learning_rate": 2.264957264957265e-05, |
|
"loss": 0.1275, |
|
"step": 4620 |
|
}, |
|
{ |
|
"epoch": 58.98, |
|
"grad_norm": 2.5661489963531494, |
|
"learning_rate": 2.257834757834758e-05, |
|
"loss": 0.1276, |
|
"step": 4630 |
|
}, |
|
{ |
|
"epoch": 58.99, |
|
"eval_accuracy": 0.9746636771300449, |
|
"eval_loss": 0.08008856326341629, |
|
"eval_runtime": 11.2045, |
|
"eval_samples_per_second": 398.054, |
|
"eval_steps_per_second": 3.124, |
|
"step": 4631 |
|
}, |
|
{ |
|
"epoch": 59.11, |
|
"grad_norm": 5.07080602645874, |
|
"learning_rate": 2.250712250712251e-05, |
|
"loss": 0.1212, |
|
"step": 4640 |
|
}, |
|
{ |
|
"epoch": 59.24, |
|
"grad_norm": 3.80442476272583, |
|
"learning_rate": 2.2435897435897437e-05, |
|
"loss": 0.1254, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 59.36, |
|
"grad_norm": 2.6264405250549316, |
|
"learning_rate": 2.2364672364672366e-05, |
|
"loss": 0.1206, |
|
"step": 4660 |
|
}, |
|
{ |
|
"epoch": 59.49, |
|
"grad_norm": 2.7287278175354004, |
|
"learning_rate": 2.2293447293447295e-05, |
|
"loss": 0.1206, |
|
"step": 4670 |
|
}, |
|
{ |
|
"epoch": 59.62, |
|
"grad_norm": 4.177188873291016, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.1283, |
|
"step": 4680 |
|
}, |
|
{ |
|
"epoch": 59.75, |
|
"grad_norm": 3.6389882564544678, |
|
"learning_rate": 2.2150997150997152e-05, |
|
"loss": 0.1373, |
|
"step": 4690 |
|
}, |
|
{ |
|
"epoch": 59.87, |
|
"grad_norm": 3.7176778316497803, |
|
"learning_rate": 2.207977207977208e-05, |
|
"loss": 0.1225, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"grad_norm": 3.90850567817688, |
|
"learning_rate": 2.200854700854701e-05, |
|
"loss": 0.1372, |
|
"step": 4710 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy": 0.9762331838565023, |
|
"eval_loss": 0.06674081087112427, |
|
"eval_runtime": 10.292, |
|
"eval_samples_per_second": 433.347, |
|
"eval_steps_per_second": 3.401, |
|
"step": 4710 |
|
}, |
|
{ |
|
"epoch": 60.13, |
|
"grad_norm": 2.7248566150665283, |
|
"learning_rate": 2.1937321937321938e-05, |
|
"loss": 0.1263, |
|
"step": 4720 |
|
}, |
|
{ |
|
"epoch": 60.25, |
|
"grad_norm": 4.5060319900512695, |
|
"learning_rate": 2.1866096866096867e-05, |
|
"loss": 0.1239, |
|
"step": 4730 |
|
}, |
|
{ |
|
"epoch": 60.38, |
|
"grad_norm": 3.740729331970215, |
|
"learning_rate": 2.1794871794871795e-05, |
|
"loss": 0.1179, |
|
"step": 4740 |
|
}, |
|
{ |
|
"epoch": 60.51, |
|
"grad_norm": 3.870370388031006, |
|
"learning_rate": 2.1723646723646724e-05, |
|
"loss": 0.1281, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 60.64, |
|
"grad_norm": 3.314084768295288, |
|
"learning_rate": 2.1652421652421653e-05, |
|
"loss": 0.1097, |
|
"step": 4760 |
|
}, |
|
{ |
|
"epoch": 60.76, |
|
"grad_norm": 3.503286838531494, |
|
"learning_rate": 2.1581196581196585e-05, |
|
"loss": 0.1349, |
|
"step": 4770 |
|
}, |
|
{ |
|
"epoch": 60.89, |
|
"grad_norm": 4.666501045227051, |
|
"learning_rate": 2.150997150997151e-05, |
|
"loss": 0.1417, |
|
"step": 4780 |
|
}, |
|
{ |
|
"epoch": 60.99, |
|
"eval_accuracy": 0.9737668161434978, |
|
"eval_loss": 0.07472814619541168, |
|
"eval_runtime": 10.7094, |
|
"eval_samples_per_second": 416.456, |
|
"eval_steps_per_second": 3.268, |
|
"step": 4788 |
|
}, |
|
{ |
|
"epoch": 61.02, |
|
"grad_norm": 3.3184502124786377, |
|
"learning_rate": 2.143874643874644e-05, |
|
"loss": 0.1315, |
|
"step": 4790 |
|
}, |
|
{ |
|
"epoch": 61.15, |
|
"grad_norm": 2.5868165493011475, |
|
"learning_rate": 2.1367521367521368e-05, |
|
"loss": 0.125, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 61.27, |
|
"grad_norm": 2.161036729812622, |
|
"learning_rate": 2.1296296296296296e-05, |
|
"loss": 0.1302, |
|
"step": 4810 |
|
}, |
|
{ |
|
"epoch": 61.4, |
|
"grad_norm": 4.309581279754639, |
|
"learning_rate": 2.1225071225071228e-05, |
|
"loss": 0.1263, |
|
"step": 4820 |
|
}, |
|
{ |
|
"epoch": 61.53, |
|
"grad_norm": 4.2532148361206055, |
|
"learning_rate": 2.1153846153846154e-05, |
|
"loss": 0.1235, |
|
"step": 4830 |
|
}, |
|
{ |
|
"epoch": 61.66, |
|
"grad_norm": 3.6365723609924316, |
|
"learning_rate": 2.1082621082621086e-05, |
|
"loss": 0.1237, |
|
"step": 4840 |
|
}, |
|
{ |
|
"epoch": 61.78, |
|
"grad_norm": 2.715269088745117, |
|
"learning_rate": 2.101139601139601e-05, |
|
"loss": 0.1224, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 61.91, |
|
"grad_norm": 1.8564375638961792, |
|
"learning_rate": 2.0940170940170943e-05, |
|
"loss": 0.1249, |
|
"step": 4860 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"eval_accuracy": 0.974439461883408, |
|
"eval_loss": 0.07346644997596741, |
|
"eval_runtime": 11.0856, |
|
"eval_samples_per_second": 402.324, |
|
"eval_steps_per_second": 3.157, |
|
"step": 4867 |
|
}, |
|
{ |
|
"epoch": 62.04, |
|
"grad_norm": 6.367058753967285, |
|
"learning_rate": 2.0868945868945868e-05, |
|
"loss": 0.1208, |
|
"step": 4870 |
|
}, |
|
{ |
|
"epoch": 62.17, |
|
"grad_norm": 2.9588770866394043, |
|
"learning_rate": 2.07977207977208e-05, |
|
"loss": 0.1199, |
|
"step": 4880 |
|
}, |
|
{ |
|
"epoch": 62.29, |
|
"grad_norm": 2.998056650161743, |
|
"learning_rate": 2.072649572649573e-05, |
|
"loss": 0.1325, |
|
"step": 4890 |
|
}, |
|
{ |
|
"epoch": 62.42, |
|
"grad_norm": 3.0851778984069824, |
|
"learning_rate": 2.0655270655270654e-05, |
|
"loss": 0.1289, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 62.55, |
|
"grad_norm": 3.708634376525879, |
|
"learning_rate": 2.0584045584045586e-05, |
|
"loss": 0.1262, |
|
"step": 4910 |
|
}, |
|
{ |
|
"epoch": 62.68, |
|
"grad_norm": 3.1951818466186523, |
|
"learning_rate": 2.0512820512820512e-05, |
|
"loss": 0.1289, |
|
"step": 4920 |
|
}, |
|
{ |
|
"epoch": 62.8, |
|
"grad_norm": 3.3787269592285156, |
|
"learning_rate": 2.0441595441595444e-05, |
|
"loss": 0.12, |
|
"step": 4930 |
|
}, |
|
{ |
|
"epoch": 62.93, |
|
"grad_norm": 3.423536539077759, |
|
"learning_rate": 2.037037037037037e-05, |
|
"loss": 0.1212, |
|
"step": 4940 |
|
}, |
|
{ |
|
"epoch": 62.99, |
|
"eval_accuracy": 0.9780269058295964, |
|
"eval_loss": 0.06646457314491272, |
|
"eval_runtime": 11.0112, |
|
"eval_samples_per_second": 405.041, |
|
"eval_steps_per_second": 3.179, |
|
"step": 4945 |
|
}, |
|
{ |
|
"epoch": 63.06, |
|
"grad_norm": 2.744992733001709, |
|
"learning_rate": 2.02991452991453e-05, |
|
"loss": 0.1274, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 63.18, |
|
"grad_norm": 2.38899564743042, |
|
"learning_rate": 2.022792022792023e-05, |
|
"loss": 0.1175, |
|
"step": 4960 |
|
}, |
|
{ |
|
"epoch": 63.31, |
|
"grad_norm": 3.2532944679260254, |
|
"learning_rate": 2.015669515669516e-05, |
|
"loss": 0.127, |
|
"step": 4970 |
|
}, |
|
{ |
|
"epoch": 63.44, |
|
"grad_norm": 3.285688877105713, |
|
"learning_rate": 2.0085470085470087e-05, |
|
"loss": 0.1229, |
|
"step": 4980 |
|
}, |
|
{ |
|
"epoch": 63.57, |
|
"grad_norm": 3.6270945072174072, |
|
"learning_rate": 2.0014245014245016e-05, |
|
"loss": 0.1299, |
|
"step": 4990 |
|
}, |
|
{ |
|
"epoch": 63.69, |
|
"grad_norm": 3.3796772956848145, |
|
"learning_rate": 1.9943019943019945e-05, |
|
"loss": 0.1328, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 63.82, |
|
"grad_norm": 2.5864744186401367, |
|
"learning_rate": 1.987179487179487e-05, |
|
"loss": 0.1214, |
|
"step": 5010 |
|
}, |
|
{ |
|
"epoch": 63.95, |
|
"grad_norm": 3.8097081184387207, |
|
"learning_rate": 1.9800569800569802e-05, |
|
"loss": 0.1218, |
|
"step": 5020 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"eval_accuracy": 0.9771300448430493, |
|
"eval_loss": 0.06802598387002945, |
|
"eval_runtime": 10.3686, |
|
"eval_samples_per_second": 430.144, |
|
"eval_steps_per_second": 3.376, |
|
"step": 5024 |
|
}, |
|
{ |
|
"epoch": 64.08, |
|
"grad_norm": 4.177213668823242, |
|
"learning_rate": 1.972934472934473e-05, |
|
"loss": 0.1228, |
|
"step": 5030 |
|
}, |
|
{ |
|
"epoch": 64.2, |
|
"grad_norm": 2.647430419921875, |
|
"learning_rate": 1.965811965811966e-05, |
|
"loss": 0.1301, |
|
"step": 5040 |
|
}, |
|
{ |
|
"epoch": 64.33, |
|
"grad_norm": 2.9064316749572754, |
|
"learning_rate": 1.9586894586894588e-05, |
|
"loss": 0.1324, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 64.46, |
|
"grad_norm": 5.072793006896973, |
|
"learning_rate": 1.9515669515669517e-05, |
|
"loss": 0.1187, |
|
"step": 5060 |
|
}, |
|
{ |
|
"epoch": 64.59, |
|
"grad_norm": 3.9644272327423096, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 0.1193, |
|
"step": 5070 |
|
}, |
|
{ |
|
"epoch": 64.71, |
|
"grad_norm": 3.9974265098571777, |
|
"learning_rate": 1.9373219373219374e-05, |
|
"loss": 0.1102, |
|
"step": 5080 |
|
}, |
|
{ |
|
"epoch": 64.84, |
|
"grad_norm": 4.116329669952393, |
|
"learning_rate": 1.9301994301994303e-05, |
|
"loss": 0.1259, |
|
"step": 5090 |
|
}, |
|
{ |
|
"epoch": 64.97, |
|
"grad_norm": 3.9444773197174072, |
|
"learning_rate": 1.923076923076923e-05, |
|
"loss": 0.1193, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 64.99, |
|
"eval_accuracy": 0.9760089686098655, |
|
"eval_loss": 0.06792016327381134, |
|
"eval_runtime": 10.2369, |
|
"eval_samples_per_second": 435.679, |
|
"eval_steps_per_second": 3.419, |
|
"step": 5102 |
|
}, |
|
{ |
|
"epoch": 65.1, |
|
"grad_norm": 3.6853489875793457, |
|
"learning_rate": 1.915954415954416e-05, |
|
"loss": 0.1193, |
|
"step": 5110 |
|
}, |
|
{ |
|
"epoch": 65.22, |
|
"grad_norm": 3.1782078742980957, |
|
"learning_rate": 1.908831908831909e-05, |
|
"loss": 0.1237, |
|
"step": 5120 |
|
}, |
|
{ |
|
"epoch": 65.35, |
|
"grad_norm": 2.1836137771606445, |
|
"learning_rate": 1.9017094017094017e-05, |
|
"loss": 0.1157, |
|
"step": 5130 |
|
}, |
|
{ |
|
"epoch": 65.48, |
|
"grad_norm": 3.0715341567993164, |
|
"learning_rate": 1.8945868945868946e-05, |
|
"loss": 0.1252, |
|
"step": 5140 |
|
}, |
|
{ |
|
"epoch": 65.61, |
|
"grad_norm": 4.32092809677124, |
|
"learning_rate": 1.8874643874643875e-05, |
|
"loss": 0.1203, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 65.73, |
|
"grad_norm": 2.6298437118530273, |
|
"learning_rate": 1.8803418803418804e-05, |
|
"loss": 0.1175, |
|
"step": 5160 |
|
}, |
|
{ |
|
"epoch": 65.86, |
|
"grad_norm": 3.1503069400787354, |
|
"learning_rate": 1.8732193732193736e-05, |
|
"loss": 0.1277, |
|
"step": 5170 |
|
}, |
|
{ |
|
"epoch": 65.99, |
|
"grad_norm": 2.627164363861084, |
|
"learning_rate": 1.866096866096866e-05, |
|
"loss": 0.1148, |
|
"step": 5180 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"eval_accuracy": 0.9773542600896861, |
|
"eval_loss": 0.0685017853975296, |
|
"eval_runtime": 10.4375, |
|
"eval_samples_per_second": 427.307, |
|
"eval_steps_per_second": 3.353, |
|
"step": 5181 |
|
}, |
|
{ |
|
"epoch": 66.11, |
|
"grad_norm": 3.094813346862793, |
|
"learning_rate": 1.858974358974359e-05, |
|
"loss": 0.1221, |
|
"step": 5190 |
|
}, |
|
{ |
|
"epoch": 66.24, |
|
"grad_norm": 2.4847347736358643, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.1395, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 66.37, |
|
"grad_norm": 2.964954137802124, |
|
"learning_rate": 1.8447293447293447e-05, |
|
"loss": 0.1105, |
|
"step": 5210 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"grad_norm": 4.3417439460754395, |
|
"learning_rate": 1.837606837606838e-05, |
|
"loss": 0.1168, |
|
"step": 5220 |
|
}, |
|
{ |
|
"epoch": 66.62, |
|
"grad_norm": 3.419844388961792, |
|
"learning_rate": 1.8304843304843304e-05, |
|
"loss": 0.1181, |
|
"step": 5230 |
|
}, |
|
{ |
|
"epoch": 66.75, |
|
"grad_norm": 3.0345187187194824, |
|
"learning_rate": 1.8233618233618236e-05, |
|
"loss": 0.1174, |
|
"step": 5240 |
|
}, |
|
{ |
|
"epoch": 66.88, |
|
"grad_norm": 2.317073345184326, |
|
"learning_rate": 1.8162393162393162e-05, |
|
"loss": 0.1242, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 66.99, |
|
"eval_accuracy": 0.9800448430493274, |
|
"eval_loss": 0.06472171097993851, |
|
"eval_runtime": 11.3805, |
|
"eval_samples_per_second": 391.9, |
|
"eval_steps_per_second": 3.075, |
|
"step": 5259 |
|
}, |
|
{ |
|
"epoch": 67.01, |
|
"grad_norm": 2.867168664932251, |
|
"learning_rate": 1.8091168091168094e-05, |
|
"loss": 0.1135, |
|
"step": 5260 |
|
}, |
|
{ |
|
"epoch": 67.13, |
|
"grad_norm": 3.222736358642578, |
|
"learning_rate": 1.801994301994302e-05, |
|
"loss": 0.1148, |
|
"step": 5270 |
|
}, |
|
{ |
|
"epoch": 67.26, |
|
"grad_norm": 2.9340291023254395, |
|
"learning_rate": 1.794871794871795e-05, |
|
"loss": 0.1194, |
|
"step": 5280 |
|
}, |
|
{ |
|
"epoch": 67.39, |
|
"grad_norm": 3.0920584201812744, |
|
"learning_rate": 1.787749287749288e-05, |
|
"loss": 0.1256, |
|
"step": 5290 |
|
}, |
|
{ |
|
"epoch": 67.52, |
|
"grad_norm": 3.2834219932556152, |
|
"learning_rate": 1.7806267806267805e-05, |
|
"loss": 0.1177, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 67.64, |
|
"grad_norm": 2.8663060665130615, |
|
"learning_rate": 1.7735042735042737e-05, |
|
"loss": 0.1135, |
|
"step": 5310 |
|
}, |
|
{ |
|
"epoch": 67.77, |
|
"grad_norm": 2.839386224746704, |
|
"learning_rate": 1.7663817663817662e-05, |
|
"loss": 0.1273, |
|
"step": 5320 |
|
}, |
|
{ |
|
"epoch": 67.9, |
|
"grad_norm": 3.285187005996704, |
|
"learning_rate": 1.7592592592592595e-05, |
|
"loss": 0.1167, |
|
"step": 5330 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"eval_accuracy": 0.9782511210762331, |
|
"eval_loss": 0.06463192403316498, |
|
"eval_runtime": 11.1553, |
|
"eval_samples_per_second": 399.809, |
|
"eval_steps_per_second": 3.138, |
|
"step": 5338 |
|
}, |
|
{ |
|
"epoch": 68.03, |
|
"grad_norm": 3.3999927043914795, |
|
"learning_rate": 1.752136752136752e-05, |
|
"loss": 0.1185, |
|
"step": 5340 |
|
}, |
|
{ |
|
"epoch": 68.15, |
|
"grad_norm": 3.005258798599243, |
|
"learning_rate": 1.7450142450142452e-05, |
|
"loss": 0.1158, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 68.28, |
|
"grad_norm": 2.6332740783691406, |
|
"learning_rate": 1.737891737891738e-05, |
|
"loss": 0.1251, |
|
"step": 5360 |
|
}, |
|
{ |
|
"epoch": 68.41, |
|
"grad_norm": 3.3177764415740967, |
|
"learning_rate": 1.730769230769231e-05, |
|
"loss": 0.1149, |
|
"step": 5370 |
|
}, |
|
{ |
|
"epoch": 68.54, |
|
"grad_norm": 2.7641139030456543, |
|
"learning_rate": 1.7236467236467238e-05, |
|
"loss": 0.1051, |
|
"step": 5380 |
|
}, |
|
{ |
|
"epoch": 68.66, |
|
"grad_norm": 2.804523468017578, |
|
"learning_rate": 1.7165242165242167e-05, |
|
"loss": 0.1142, |
|
"step": 5390 |
|
}, |
|
{ |
|
"epoch": 68.79, |
|
"grad_norm": 2.723928928375244, |
|
"learning_rate": 1.7094017094017095e-05, |
|
"loss": 0.1225, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 68.92, |
|
"grad_norm": 3.367866039276123, |
|
"learning_rate": 1.702279202279202e-05, |
|
"loss": 0.117, |
|
"step": 5410 |
|
}, |
|
{ |
|
"epoch": 68.99, |
|
"eval_accuracy": 0.976457399103139, |
|
"eval_loss": 0.07633925974369049, |
|
"eval_runtime": 10.4599, |
|
"eval_samples_per_second": 426.391, |
|
"eval_steps_per_second": 3.346, |
|
"step": 5416 |
|
}, |
|
{ |
|
"epoch": 69.04, |
|
"grad_norm": 3.8155672550201416, |
|
"learning_rate": 1.6951566951566953e-05, |
|
"loss": 0.1192, |
|
"step": 5420 |
|
}, |
|
{ |
|
"epoch": 69.17, |
|
"grad_norm": 3.2433383464813232, |
|
"learning_rate": 1.688034188034188e-05, |
|
"loss": 0.124, |
|
"step": 5430 |
|
}, |
|
{ |
|
"epoch": 69.3, |
|
"grad_norm": 3.6944828033447266, |
|
"learning_rate": 1.680911680911681e-05, |
|
"loss": 0.1164, |
|
"step": 5440 |
|
}, |
|
{ |
|
"epoch": 69.43, |
|
"grad_norm": 3.125169515609741, |
|
"learning_rate": 1.673789173789174e-05, |
|
"loss": 0.1271, |
|
"step": 5450 |
|
}, |
|
{ |
|
"epoch": 69.55, |
|
"grad_norm": 3.082037925720215, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.1121, |
|
"step": 5460 |
|
}, |
|
{ |
|
"epoch": 69.68, |
|
"grad_norm": 2.831787586212158, |
|
"learning_rate": 1.6595441595441596e-05, |
|
"loss": 0.113, |
|
"step": 5470 |
|
}, |
|
{ |
|
"epoch": 69.81, |
|
"grad_norm": 4.720918655395508, |
|
"learning_rate": 1.6524216524216525e-05, |
|
"loss": 0.1211, |
|
"step": 5480 |
|
}, |
|
{ |
|
"epoch": 69.94, |
|
"grad_norm": 3.05661940574646, |
|
"learning_rate": 1.6452991452991454e-05, |
|
"loss": 0.1153, |
|
"step": 5490 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy": 0.9753363228699552, |
|
"eval_loss": 0.07196246832609177, |
|
"eval_runtime": 10.3439, |
|
"eval_samples_per_second": 431.173, |
|
"eval_steps_per_second": 3.384, |
|
"step": 5495 |
|
}, |
|
{ |
|
"epoch": 70.06, |
|
"grad_norm": 2.578516960144043, |
|
"learning_rate": 1.6381766381766382e-05, |
|
"loss": 0.1139, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 70.19, |
|
"grad_norm": 3.541548013687134, |
|
"learning_rate": 1.631054131054131e-05, |
|
"loss": 0.1125, |
|
"step": 5510 |
|
}, |
|
{ |
|
"epoch": 70.32, |
|
"grad_norm": 2.709437847137451, |
|
"learning_rate": 1.623931623931624e-05, |
|
"loss": 0.1264, |
|
"step": 5520 |
|
}, |
|
{ |
|
"epoch": 70.45, |
|
"grad_norm": 1.947713851928711, |
|
"learning_rate": 1.6168091168091168e-05, |
|
"loss": 0.1165, |
|
"step": 5530 |
|
}, |
|
{ |
|
"epoch": 70.57, |
|
"grad_norm": 2.832228899002075, |
|
"learning_rate": 1.6096866096866097e-05, |
|
"loss": 0.1242, |
|
"step": 5540 |
|
}, |
|
{ |
|
"epoch": 70.7, |
|
"grad_norm": 1.871283769607544, |
|
"learning_rate": 1.602564102564103e-05, |
|
"loss": 0.1175, |
|
"step": 5550 |
|
}, |
|
{ |
|
"epoch": 70.83, |
|
"grad_norm": 4.204667568206787, |
|
"learning_rate": 1.5954415954415954e-05, |
|
"loss": 0.1097, |
|
"step": 5560 |
|
}, |
|
{ |
|
"epoch": 70.96, |
|
"grad_norm": 4.079117298126221, |
|
"learning_rate": 1.5883190883190886e-05, |
|
"loss": 0.12, |
|
"step": 5570 |
|
}, |
|
{ |
|
"epoch": 70.99, |
|
"eval_accuracy": 0.9771300448430493, |
|
"eval_loss": 0.07174661755561829, |
|
"eval_runtime": 10.2885, |
|
"eval_samples_per_second": 433.494, |
|
"eval_steps_per_second": 3.402, |
|
"step": 5573 |
|
}, |
|
{ |
|
"epoch": 71.08, |
|
"grad_norm": 2.7620456218719482, |
|
"learning_rate": 1.581196581196581e-05, |
|
"loss": 0.1112, |
|
"step": 5580 |
|
}, |
|
{ |
|
"epoch": 71.21, |
|
"grad_norm": 2.8281893730163574, |
|
"learning_rate": 1.574074074074074e-05, |
|
"loss": 0.1207, |
|
"step": 5590 |
|
}, |
|
{ |
|
"epoch": 71.34, |
|
"grad_norm": 3.6722238063812256, |
|
"learning_rate": 1.566951566951567e-05, |
|
"loss": 0.1306, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 71.46, |
|
"grad_norm": 3.395185708999634, |
|
"learning_rate": 1.5598290598290598e-05, |
|
"loss": 0.1093, |
|
"step": 5610 |
|
}, |
|
{ |
|
"epoch": 71.59, |
|
"grad_norm": 1.971070647239685, |
|
"learning_rate": 1.552706552706553e-05, |
|
"loss": 0.1136, |
|
"step": 5620 |
|
}, |
|
{ |
|
"epoch": 71.72, |
|
"grad_norm": 2.9130399227142334, |
|
"learning_rate": 1.5455840455840455e-05, |
|
"loss": 0.1003, |
|
"step": 5630 |
|
}, |
|
{ |
|
"epoch": 71.85, |
|
"grad_norm": 2.5622756481170654, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 0.1079, |
|
"step": 5640 |
|
}, |
|
{ |
|
"epoch": 71.97, |
|
"grad_norm": 3.1971945762634277, |
|
"learning_rate": 1.5313390313390312e-05, |
|
"loss": 0.1054, |
|
"step": 5650 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"eval_accuracy": 0.9766816143497757, |
|
"eval_loss": 0.067739337682724, |
|
"eval_runtime": 10.4674, |
|
"eval_samples_per_second": 426.084, |
|
"eval_steps_per_second": 3.344, |
|
"step": 5652 |
|
}, |
|
{ |
|
"epoch": 72.1, |
|
"grad_norm": 2.922377109527588, |
|
"learning_rate": 1.5242165242165243e-05, |
|
"loss": 0.1139, |
|
"step": 5660 |
|
}, |
|
{ |
|
"epoch": 72.23, |
|
"grad_norm": 2.4021689891815186, |
|
"learning_rate": 1.517094017094017e-05, |
|
"loss": 0.108, |
|
"step": 5670 |
|
}, |
|
{ |
|
"epoch": 72.36, |
|
"grad_norm": 2.4300858974456787, |
|
"learning_rate": 1.50997150997151e-05, |
|
"loss": 0.0996, |
|
"step": 5680 |
|
}, |
|
{ |
|
"epoch": 72.48, |
|
"grad_norm": 2.820833206176758, |
|
"learning_rate": 1.502849002849003e-05, |
|
"loss": 0.1135, |
|
"step": 5690 |
|
}, |
|
{ |
|
"epoch": 72.61, |
|
"grad_norm": 3.1016552448272705, |
|
"learning_rate": 1.4957264957264958e-05, |
|
"loss": 0.1242, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 72.74, |
|
"grad_norm": 3.2477970123291016, |
|
"learning_rate": 1.4886039886039888e-05, |
|
"loss": 0.1238, |
|
"step": 5710 |
|
}, |
|
{ |
|
"epoch": 72.87, |
|
"grad_norm": 3.47007155418396, |
|
"learning_rate": 1.4814814814814815e-05, |
|
"loss": 0.1118, |
|
"step": 5720 |
|
}, |
|
{ |
|
"epoch": 72.99, |
|
"grad_norm": 4.1029534339904785, |
|
"learning_rate": 1.4743589743589745e-05, |
|
"loss": 0.1183, |
|
"step": 5730 |
|
}, |
|
{ |
|
"epoch": 72.99, |
|
"eval_accuracy": 0.9755605381165919, |
|
"eval_loss": 0.07409787178039551, |
|
"eval_runtime": 10.2462, |
|
"eval_samples_per_second": 435.283, |
|
"eval_steps_per_second": 3.416, |
|
"step": 5730 |
|
}, |
|
{ |
|
"epoch": 73.12, |
|
"grad_norm": 2.9067156314849854, |
|
"learning_rate": 1.4672364672364672e-05, |
|
"loss": 0.1128, |
|
"step": 5740 |
|
}, |
|
{ |
|
"epoch": 73.25, |
|
"grad_norm": 2.93924880027771, |
|
"learning_rate": 1.4601139601139601e-05, |
|
"loss": 0.1098, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 73.38, |
|
"grad_norm": 2.739440679550171, |
|
"learning_rate": 1.4529914529914531e-05, |
|
"loss": 0.1261, |
|
"step": 5760 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"grad_norm": 2.8033335208892822, |
|
"learning_rate": 1.4458689458689458e-05, |
|
"loss": 0.1106, |
|
"step": 5770 |
|
}, |
|
{ |
|
"epoch": 73.63, |
|
"grad_norm": 2.91092586517334, |
|
"learning_rate": 1.4387464387464389e-05, |
|
"loss": 0.1042, |
|
"step": 5780 |
|
}, |
|
{ |
|
"epoch": 73.76, |
|
"grad_norm": 3.8227293491363525, |
|
"learning_rate": 1.4316239316239316e-05, |
|
"loss": 0.1074, |
|
"step": 5790 |
|
}, |
|
{ |
|
"epoch": 73.89, |
|
"grad_norm": 2.5820155143737793, |
|
"learning_rate": 1.4245014245014246e-05, |
|
"loss": 0.1082, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"eval_accuracy": 0.9786995515695067, |
|
"eval_loss": 0.06757389008998871, |
|
"eval_runtime": 11.2378, |
|
"eval_samples_per_second": 396.874, |
|
"eval_steps_per_second": 3.114, |
|
"step": 5809 |
|
}, |
|
{ |
|
"epoch": 74.01, |
|
"grad_norm": 3.325789451599121, |
|
"learning_rate": 1.4173789173789173e-05, |
|
"loss": 0.1094, |
|
"step": 5810 |
|
}, |
|
{ |
|
"epoch": 74.14, |
|
"grad_norm": 4.414618968963623, |
|
"learning_rate": 1.4102564102564104e-05, |
|
"loss": 0.1132, |
|
"step": 5820 |
|
}, |
|
{ |
|
"epoch": 74.27, |
|
"grad_norm": 3.79183292388916, |
|
"learning_rate": 1.4031339031339034e-05, |
|
"loss": 0.1049, |
|
"step": 5830 |
|
}, |
|
{ |
|
"epoch": 74.39, |
|
"grad_norm": 3.2433505058288574, |
|
"learning_rate": 1.3960113960113961e-05, |
|
"loss": 0.106, |
|
"step": 5840 |
|
}, |
|
{ |
|
"epoch": 74.52, |
|
"grad_norm": 2.762228488922119, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.1164, |
|
"step": 5850 |
|
}, |
|
{ |
|
"epoch": 74.65, |
|
"grad_norm": 3.4645419120788574, |
|
"learning_rate": 1.3817663817663817e-05, |
|
"loss": 0.1207, |
|
"step": 5860 |
|
}, |
|
{ |
|
"epoch": 74.78, |
|
"grad_norm": 2.7387356758117676, |
|
"learning_rate": 1.3746438746438747e-05, |
|
"loss": 0.117, |
|
"step": 5870 |
|
}, |
|
{ |
|
"epoch": 74.9, |
|
"grad_norm": 3.4120523929595947, |
|
"learning_rate": 1.3675213675213677e-05, |
|
"loss": 0.1088, |
|
"step": 5880 |
|
}, |
|
{ |
|
"epoch": 74.99, |
|
"eval_accuracy": 0.9751121076233183, |
|
"eval_loss": 0.06995895504951477, |
|
"eval_runtime": 11.4177, |
|
"eval_samples_per_second": 390.621, |
|
"eval_steps_per_second": 3.065, |
|
"step": 5887 |
|
}, |
|
{ |
|
"epoch": 75.03, |
|
"grad_norm": 2.275909662246704, |
|
"learning_rate": 1.3603988603988604e-05, |
|
"loss": 0.1245, |
|
"step": 5890 |
|
}, |
|
{ |
|
"epoch": 75.16, |
|
"grad_norm": 2.343656539916992, |
|
"learning_rate": 1.3532763532763535e-05, |
|
"loss": 0.1044, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 75.29, |
|
"grad_norm": 4.431203842163086, |
|
"learning_rate": 1.3461538461538462e-05, |
|
"loss": 0.1126, |
|
"step": 5910 |
|
}, |
|
{ |
|
"epoch": 75.41, |
|
"grad_norm": 2.1456689834594727, |
|
"learning_rate": 1.3390313390313392e-05, |
|
"loss": 0.1094, |
|
"step": 5920 |
|
}, |
|
{ |
|
"epoch": 75.54, |
|
"grad_norm": 2.2616610527038574, |
|
"learning_rate": 1.3319088319088319e-05, |
|
"loss": 0.1117, |
|
"step": 5930 |
|
}, |
|
{ |
|
"epoch": 75.67, |
|
"grad_norm": 3.3451130390167236, |
|
"learning_rate": 1.324786324786325e-05, |
|
"loss": 0.1229, |
|
"step": 5940 |
|
}, |
|
{ |
|
"epoch": 75.8, |
|
"grad_norm": 3.578305721282959, |
|
"learning_rate": 1.3176638176638178e-05, |
|
"loss": 0.1133, |
|
"step": 5950 |
|
}, |
|
{ |
|
"epoch": 75.92, |
|
"grad_norm": 2.3085949420928955, |
|
"learning_rate": 1.3105413105413105e-05, |
|
"loss": 0.1125, |
|
"step": 5960 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"eval_accuracy": 0.97847533632287, |
|
"eval_loss": 0.0663037970662117, |
|
"eval_runtime": 10.5423, |
|
"eval_samples_per_second": 423.057, |
|
"eval_steps_per_second": 3.32, |
|
"step": 5966 |
|
}, |
|
{ |
|
"epoch": 76.05, |
|
"grad_norm": 3.370089530944824, |
|
"learning_rate": 1.3034188034188035e-05, |
|
"loss": 0.1103, |
|
"step": 5970 |
|
}, |
|
{ |
|
"epoch": 76.18, |
|
"grad_norm": 3.2268216609954834, |
|
"learning_rate": 1.2962962962962962e-05, |
|
"loss": 0.1133, |
|
"step": 5980 |
|
}, |
|
{ |
|
"epoch": 76.31, |
|
"grad_norm": 2.5671768188476562, |
|
"learning_rate": 1.2891737891737893e-05, |
|
"loss": 0.1083, |
|
"step": 5990 |
|
}, |
|
{ |
|
"epoch": 76.43, |
|
"grad_norm": 3.591700553894043, |
|
"learning_rate": 1.282051282051282e-05, |
|
"loss": 0.1078, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 76.56, |
|
"grad_norm": 3.825608968734741, |
|
"learning_rate": 1.274928774928775e-05, |
|
"loss": 0.1116, |
|
"step": 6010 |
|
}, |
|
{ |
|
"epoch": 76.69, |
|
"grad_norm": 3.0779523849487305, |
|
"learning_rate": 1.267806267806268e-05, |
|
"loss": 0.1174, |
|
"step": 6020 |
|
}, |
|
{ |
|
"epoch": 76.82, |
|
"grad_norm": 2.5936954021453857, |
|
"learning_rate": 1.2606837606837608e-05, |
|
"loss": 0.1062, |
|
"step": 6030 |
|
}, |
|
{ |
|
"epoch": 76.94, |
|
"grad_norm": 2.4793176651000977, |
|
"learning_rate": 1.2535612535612536e-05, |
|
"loss": 0.1099, |
|
"step": 6040 |
|
}, |
|
{ |
|
"epoch": 76.99, |
|
"eval_accuracy": 0.9789237668161435, |
|
"eval_loss": 0.0621563158929348, |
|
"eval_runtime": 10.8775, |
|
"eval_samples_per_second": 410.02, |
|
"eval_steps_per_second": 3.218, |
|
"step": 6044 |
|
}, |
|
{ |
|
"epoch": 77.07, |
|
"grad_norm": 2.338437557220459, |
|
"learning_rate": 1.2464387464387465e-05, |
|
"loss": 0.1113, |
|
"step": 6050 |
|
}, |
|
{ |
|
"epoch": 77.2, |
|
"grad_norm": 3.1221530437469482, |
|
"learning_rate": 1.2393162393162394e-05, |
|
"loss": 0.1088, |
|
"step": 6060 |
|
}, |
|
{ |
|
"epoch": 77.32, |
|
"grad_norm": 4.532721519470215, |
|
"learning_rate": 1.2321937321937322e-05, |
|
"loss": 0.1117, |
|
"step": 6070 |
|
}, |
|
{ |
|
"epoch": 77.45, |
|
"grad_norm": 4.488461494445801, |
|
"learning_rate": 1.2250712250712251e-05, |
|
"loss": 0.1079, |
|
"step": 6080 |
|
}, |
|
{ |
|
"epoch": 77.58, |
|
"grad_norm": 2.4823977947235107, |
|
"learning_rate": 1.217948717948718e-05, |
|
"loss": 0.1161, |
|
"step": 6090 |
|
}, |
|
{ |
|
"epoch": 77.71, |
|
"grad_norm": 3.6858134269714355, |
|
"learning_rate": 1.2108262108262108e-05, |
|
"loss": 0.1035, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 77.83, |
|
"grad_norm": 2.647006034851074, |
|
"learning_rate": 1.2037037037037037e-05, |
|
"loss": 0.1158, |
|
"step": 6110 |
|
}, |
|
{ |
|
"epoch": 77.96, |
|
"grad_norm": 2.6735477447509766, |
|
"learning_rate": 1.1965811965811967e-05, |
|
"loss": 0.1128, |
|
"step": 6120 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"eval_accuracy": 0.979372197309417, |
|
"eval_loss": 0.06598477810621262, |
|
"eval_runtime": 10.8917, |
|
"eval_samples_per_second": 409.486, |
|
"eval_steps_per_second": 3.213, |
|
"step": 6123 |
|
}, |
|
{ |
|
"epoch": 78.09, |
|
"grad_norm": 2.4398152828216553, |
|
"learning_rate": 1.1894586894586896e-05, |
|
"loss": 0.1175, |
|
"step": 6130 |
|
}, |
|
{ |
|
"epoch": 78.22, |
|
"grad_norm": 2.345200538635254, |
|
"learning_rate": 1.1823361823361825e-05, |
|
"loss": 0.1119, |
|
"step": 6140 |
|
}, |
|
{ |
|
"epoch": 78.34, |
|
"grad_norm": 2.4092376232147217, |
|
"learning_rate": 1.1752136752136752e-05, |
|
"loss": 0.1166, |
|
"step": 6150 |
|
}, |
|
{ |
|
"epoch": 78.47, |
|
"grad_norm": 2.720093011856079, |
|
"learning_rate": 1.168091168091168e-05, |
|
"loss": 0.1072, |
|
"step": 6160 |
|
}, |
|
{ |
|
"epoch": 78.6, |
|
"grad_norm": 3.7120041847229004, |
|
"learning_rate": 1.160968660968661e-05, |
|
"loss": 0.1047, |
|
"step": 6170 |
|
}, |
|
{ |
|
"epoch": 78.73, |
|
"grad_norm": 2.8657994270324707, |
|
"learning_rate": 1.153846153846154e-05, |
|
"loss": 0.11, |
|
"step": 6180 |
|
}, |
|
{ |
|
"epoch": 78.85, |
|
"grad_norm": 4.595070838928223, |
|
"learning_rate": 1.1467236467236468e-05, |
|
"loss": 0.106, |
|
"step": 6190 |
|
}, |
|
{ |
|
"epoch": 78.98, |
|
"grad_norm": 3.3609421253204346, |
|
"learning_rate": 1.1396011396011397e-05, |
|
"loss": 0.1178, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 78.99, |
|
"eval_accuracy": 0.9780269058295964, |
|
"eval_loss": 0.06985889375209808, |
|
"eval_runtime": 10.464, |
|
"eval_samples_per_second": 426.224, |
|
"eval_steps_per_second": 3.345, |
|
"step": 6201 |
|
}, |
|
{ |
|
"epoch": 79.11, |
|
"grad_norm": 3.0199007987976074, |
|
"learning_rate": 1.1324786324786326e-05, |
|
"loss": 0.1126, |
|
"step": 6210 |
|
}, |
|
{ |
|
"epoch": 79.24, |
|
"grad_norm": 2.6402392387390137, |
|
"learning_rate": 1.1253561253561254e-05, |
|
"loss": 0.1042, |
|
"step": 6220 |
|
}, |
|
{ |
|
"epoch": 79.36, |
|
"grad_norm": 3.8217875957489014, |
|
"learning_rate": 1.1182336182336183e-05, |
|
"loss": 0.1007, |
|
"step": 6230 |
|
}, |
|
{ |
|
"epoch": 79.49, |
|
"grad_norm": 3.7943570613861084, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.1115, |
|
"step": 6240 |
|
}, |
|
{ |
|
"epoch": 79.62, |
|
"grad_norm": 3.9829814434051514, |
|
"learning_rate": 1.103988603988604e-05, |
|
"loss": 0.0956, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 79.75, |
|
"grad_norm": 3.254031181335449, |
|
"learning_rate": 1.0968660968660969e-05, |
|
"loss": 0.1088, |
|
"step": 6260 |
|
}, |
|
{ |
|
"epoch": 79.87, |
|
"grad_norm": 3.34173583984375, |
|
"learning_rate": 1.0897435897435898e-05, |
|
"loss": 0.1117, |
|
"step": 6270 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"grad_norm": 3.977674961090088, |
|
"learning_rate": 1.0826210826210826e-05, |
|
"loss": 0.1129, |
|
"step": 6280 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy": 0.979372197309417, |
|
"eval_loss": 0.0604555681347847, |
|
"eval_runtime": 10.4032, |
|
"eval_samples_per_second": 428.716, |
|
"eval_steps_per_second": 3.364, |
|
"step": 6280 |
|
}, |
|
{ |
|
"epoch": 80.13, |
|
"grad_norm": 6.222870349884033, |
|
"learning_rate": 1.0754985754985755e-05, |
|
"loss": 0.0968, |
|
"step": 6290 |
|
}, |
|
{ |
|
"epoch": 80.25, |
|
"grad_norm": 3.2638728618621826, |
|
"learning_rate": 1.0683760683760684e-05, |
|
"loss": 0.1026, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 80.38, |
|
"grad_norm": 4.064828872680664, |
|
"learning_rate": 1.0612535612535614e-05, |
|
"loss": 0.1073, |
|
"step": 6310 |
|
}, |
|
{ |
|
"epoch": 80.51, |
|
"grad_norm": 3.2518603801727295, |
|
"learning_rate": 1.0541310541310543e-05, |
|
"loss": 0.1039, |
|
"step": 6320 |
|
}, |
|
{ |
|
"epoch": 80.64, |
|
"grad_norm": 4.303738117218018, |
|
"learning_rate": 1.0470085470085471e-05, |
|
"loss": 0.1145, |
|
"step": 6330 |
|
}, |
|
{ |
|
"epoch": 80.76, |
|
"grad_norm": 3.027240514755249, |
|
"learning_rate": 1.03988603988604e-05, |
|
"loss": 0.1142, |
|
"step": 6340 |
|
}, |
|
{ |
|
"epoch": 80.89, |
|
"grad_norm": 6.462448596954346, |
|
"learning_rate": 1.0327635327635327e-05, |
|
"loss": 0.1013, |
|
"step": 6350 |
|
}, |
|
{ |
|
"epoch": 80.99, |
|
"eval_accuracy": 0.9778026905829597, |
|
"eval_loss": 0.06936540454626083, |
|
"eval_runtime": 10.4243, |
|
"eval_samples_per_second": 427.848, |
|
"eval_steps_per_second": 3.358, |
|
"step": 6358 |
|
}, |
|
{ |
|
"epoch": 81.02, |
|
"grad_norm": 3.1497411727905273, |
|
"learning_rate": 1.0256410256410256e-05, |
|
"loss": 0.0973, |
|
"step": 6360 |
|
}, |
|
{ |
|
"epoch": 81.15, |
|
"grad_norm": 2.9892778396606445, |
|
"learning_rate": 1.0185185185185185e-05, |
|
"loss": 0.1011, |
|
"step": 6370 |
|
}, |
|
{ |
|
"epoch": 81.27, |
|
"grad_norm": 3.1823902130126953, |
|
"learning_rate": 1.0113960113960115e-05, |
|
"loss": 0.1039, |
|
"step": 6380 |
|
}, |
|
{ |
|
"epoch": 81.4, |
|
"grad_norm": 2.418894052505493, |
|
"learning_rate": 1.0042735042735044e-05, |
|
"loss": 0.1163, |
|
"step": 6390 |
|
}, |
|
{ |
|
"epoch": 81.53, |
|
"grad_norm": 2.359311580657959, |
|
"learning_rate": 9.971509971509972e-06, |
|
"loss": 0.106, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 81.66, |
|
"grad_norm": 4.2471604347229, |
|
"learning_rate": 9.900284900284901e-06, |
|
"loss": 0.1007, |
|
"step": 6410 |
|
}, |
|
{ |
|
"epoch": 81.78, |
|
"grad_norm": 4.758132457733154, |
|
"learning_rate": 9.82905982905983e-06, |
|
"loss": 0.1024, |
|
"step": 6420 |
|
}, |
|
{ |
|
"epoch": 81.91, |
|
"grad_norm": 2.4870598316192627, |
|
"learning_rate": 9.757834757834758e-06, |
|
"loss": 0.1078, |
|
"step": 6430 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"eval_accuracy": 0.9782511210762331, |
|
"eval_loss": 0.06523539870977402, |
|
"eval_runtime": 10.4302, |
|
"eval_samples_per_second": 427.604, |
|
"eval_steps_per_second": 3.356, |
|
"step": 6437 |
|
}, |
|
{ |
|
"epoch": 82.04, |
|
"grad_norm": 2.7343454360961914, |
|
"learning_rate": 9.686609686609687e-06, |
|
"loss": 0.1012, |
|
"step": 6440 |
|
}, |
|
{ |
|
"epoch": 82.17, |
|
"grad_norm": 3.698735475540161, |
|
"learning_rate": 9.615384615384616e-06, |
|
"loss": 0.1075, |
|
"step": 6450 |
|
}, |
|
{ |
|
"epoch": 82.29, |
|
"grad_norm": 3.699899673461914, |
|
"learning_rate": 9.544159544159544e-06, |
|
"loss": 0.1085, |
|
"step": 6460 |
|
}, |
|
{ |
|
"epoch": 82.42, |
|
"grad_norm": 2.9340295791625977, |
|
"learning_rate": 9.472934472934473e-06, |
|
"loss": 0.1013, |
|
"step": 6470 |
|
}, |
|
{ |
|
"epoch": 82.55, |
|
"grad_norm": 2.8767290115356445, |
|
"learning_rate": 9.401709401709402e-06, |
|
"loss": 0.0997, |
|
"step": 6480 |
|
}, |
|
{ |
|
"epoch": 82.68, |
|
"grad_norm": 4.756886959075928, |
|
"learning_rate": 9.33048433048433e-06, |
|
"loss": 0.104, |
|
"step": 6490 |
|
}, |
|
{ |
|
"epoch": 82.8, |
|
"grad_norm": 5.544289588928223, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 0.1067, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 82.93, |
|
"grad_norm": 2.162623882293701, |
|
"learning_rate": 9.18803418803419e-06, |
|
"loss": 0.0994, |
|
"step": 6510 |
|
}, |
|
{ |
|
"epoch": 82.99, |
|
"eval_accuracy": 0.9811659192825112, |
|
"eval_loss": 0.060371555387973785, |
|
"eval_runtime": 10.7881, |
|
"eval_samples_per_second": 413.42, |
|
"eval_steps_per_second": 3.244, |
|
"step": 6515 |
|
}, |
|
{ |
|
"epoch": 83.06, |
|
"grad_norm": 3.7604353427886963, |
|
"learning_rate": 9.116809116809118e-06, |
|
"loss": 0.1025, |
|
"step": 6520 |
|
}, |
|
{ |
|
"epoch": 83.18, |
|
"grad_norm": 2.88057279586792, |
|
"learning_rate": 9.045584045584047e-06, |
|
"loss": 0.1064, |
|
"step": 6530 |
|
}, |
|
{ |
|
"epoch": 83.31, |
|
"grad_norm": 2.340789556503296, |
|
"learning_rate": 8.974358974358976e-06, |
|
"loss": 0.1113, |
|
"step": 6540 |
|
}, |
|
{ |
|
"epoch": 83.44, |
|
"grad_norm": 1.9274300336837769, |
|
"learning_rate": 8.903133903133903e-06, |
|
"loss": 0.0986, |
|
"step": 6550 |
|
}, |
|
{ |
|
"epoch": 83.57, |
|
"grad_norm": 3.982344627380371, |
|
"learning_rate": 8.831908831908831e-06, |
|
"loss": 0.1172, |
|
"step": 6560 |
|
}, |
|
{ |
|
"epoch": 83.69, |
|
"grad_norm": 3.080048084259033, |
|
"learning_rate": 8.76068376068376e-06, |
|
"loss": 0.1034, |
|
"step": 6570 |
|
}, |
|
{ |
|
"epoch": 83.82, |
|
"grad_norm": 2.989431619644165, |
|
"learning_rate": 8.68945868945869e-06, |
|
"loss": 0.1117, |
|
"step": 6580 |
|
}, |
|
{ |
|
"epoch": 83.95, |
|
"grad_norm": 2.0674595832824707, |
|
"learning_rate": 8.618233618233619e-06, |
|
"loss": 0.1093, |
|
"step": 6590 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"eval_accuracy": 0.9804932735426009, |
|
"eval_loss": 0.06000491976737976, |
|
"eval_runtime": 10.3525, |
|
"eval_samples_per_second": 430.812, |
|
"eval_steps_per_second": 3.381, |
|
"step": 6594 |
|
}, |
|
{ |
|
"epoch": 84.08, |
|
"grad_norm": 2.6291697025299072, |
|
"learning_rate": 8.547008547008548e-06, |
|
"loss": 0.1044, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 84.2, |
|
"grad_norm": 2.8807177543640137, |
|
"learning_rate": 8.475783475783476e-06, |
|
"loss": 0.1062, |
|
"step": 6610 |
|
}, |
|
{ |
|
"epoch": 84.33, |
|
"grad_norm": 2.1789872646331787, |
|
"learning_rate": 8.404558404558405e-06, |
|
"loss": 0.106, |
|
"step": 6620 |
|
}, |
|
{ |
|
"epoch": 84.46, |
|
"grad_norm": 3.824889898300171, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.1086, |
|
"step": 6630 |
|
}, |
|
{ |
|
"epoch": 84.59, |
|
"grad_norm": 3.5731046199798584, |
|
"learning_rate": 8.262108262108262e-06, |
|
"loss": 0.1015, |
|
"step": 6640 |
|
}, |
|
{ |
|
"epoch": 84.71, |
|
"grad_norm": 2.0508885383605957, |
|
"learning_rate": 8.190883190883191e-06, |
|
"loss": 0.1016, |
|
"step": 6650 |
|
}, |
|
{ |
|
"epoch": 84.84, |
|
"grad_norm": 3.047819137573242, |
|
"learning_rate": 8.11965811965812e-06, |
|
"loss": 0.1017, |
|
"step": 6660 |
|
}, |
|
{ |
|
"epoch": 84.97, |
|
"grad_norm": 3.115485191345215, |
|
"learning_rate": 8.048433048433048e-06, |
|
"loss": 0.1039, |
|
"step": 6670 |
|
}, |
|
{ |
|
"epoch": 84.99, |
|
"eval_accuracy": 0.9786995515695067, |
|
"eval_loss": 0.06455076485872269, |
|
"eval_runtime": 10.2681, |
|
"eval_samples_per_second": 434.357, |
|
"eval_steps_per_second": 3.409, |
|
"step": 6672 |
|
}, |
|
{ |
|
"epoch": 85.1, |
|
"grad_norm": 2.3923819065093994, |
|
"learning_rate": 7.977207977207977e-06, |
|
"loss": 0.1023, |
|
"step": 6680 |
|
}, |
|
{ |
|
"epoch": 85.22, |
|
"grad_norm": 4.972830772399902, |
|
"learning_rate": 7.905982905982906e-06, |
|
"loss": 0.1017, |
|
"step": 6690 |
|
}, |
|
{ |
|
"epoch": 85.35, |
|
"grad_norm": 3.3479678630828857, |
|
"learning_rate": 7.834757834757835e-06, |
|
"loss": 0.1057, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 85.48, |
|
"grad_norm": 4.136251449584961, |
|
"learning_rate": 7.763532763532765e-06, |
|
"loss": 0.1159, |
|
"step": 6710 |
|
}, |
|
{ |
|
"epoch": 85.61, |
|
"grad_norm": 3.715238094329834, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 0.1068, |
|
"step": 6720 |
|
}, |
|
{ |
|
"epoch": 85.73, |
|
"grad_norm": 2.57712459564209, |
|
"learning_rate": 7.6210826210826214e-06, |
|
"loss": 0.1127, |
|
"step": 6730 |
|
}, |
|
{ |
|
"epoch": 85.86, |
|
"grad_norm": 2.153548240661621, |
|
"learning_rate": 7.54985754985755e-06, |
|
"loss": 0.108, |
|
"step": 6740 |
|
}, |
|
{ |
|
"epoch": 85.99, |
|
"grad_norm": 2.7407655715942383, |
|
"learning_rate": 7.478632478632479e-06, |
|
"loss": 0.0963, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"eval_accuracy": 0.9789237668161435, |
|
"eval_loss": 0.06539863348007202, |
|
"eval_runtime": 10.5875, |
|
"eval_samples_per_second": 421.253, |
|
"eval_steps_per_second": 3.306, |
|
"step": 6751 |
|
}, |
|
{ |
|
"epoch": 86.11, |
|
"grad_norm": 2.8693339824676514, |
|
"learning_rate": 7.4074074074074075e-06, |
|
"loss": 0.0998, |
|
"step": 6760 |
|
}, |
|
{ |
|
"epoch": 86.24, |
|
"grad_norm": 5.288963317871094, |
|
"learning_rate": 7.336182336182336e-06, |
|
"loss": 0.1129, |
|
"step": 6770 |
|
}, |
|
{ |
|
"epoch": 86.37, |
|
"grad_norm": 3.063286066055298, |
|
"learning_rate": 7.264957264957266e-06, |
|
"loss": 0.1013, |
|
"step": 6780 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"grad_norm": 3.49326229095459, |
|
"learning_rate": 7.193732193732194e-06, |
|
"loss": 0.1135, |
|
"step": 6790 |
|
}, |
|
{ |
|
"epoch": 86.62, |
|
"grad_norm": 3.090437412261963, |
|
"learning_rate": 7.122507122507123e-06, |
|
"loss": 0.1067, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 86.75, |
|
"grad_norm": 2.2138142585754395, |
|
"learning_rate": 7.051282051282052e-06, |
|
"loss": 0.0982, |
|
"step": 6810 |
|
}, |
|
{ |
|
"epoch": 86.88, |
|
"grad_norm": 2.791416883468628, |
|
"learning_rate": 6.9800569800569804e-06, |
|
"loss": 0.1053, |
|
"step": 6820 |
|
}, |
|
{ |
|
"epoch": 86.99, |
|
"eval_accuracy": 0.9802690582959641, |
|
"eval_loss": 0.06266650557518005, |
|
"eval_runtime": 10.4188, |
|
"eval_samples_per_second": 428.073, |
|
"eval_steps_per_second": 3.359, |
|
"step": 6829 |
|
}, |
|
{ |
|
"epoch": 87.01, |
|
"grad_norm": 2.712346076965332, |
|
"learning_rate": 6.908831908831908e-06, |
|
"loss": 0.1096, |
|
"step": 6830 |
|
}, |
|
{ |
|
"epoch": 87.13, |
|
"grad_norm": 3.091013193130493, |
|
"learning_rate": 6.837606837606839e-06, |
|
"loss": 0.1135, |
|
"step": 6840 |
|
}, |
|
{ |
|
"epoch": 87.26, |
|
"grad_norm": 2.5728962421417236, |
|
"learning_rate": 6.766381766381767e-06, |
|
"loss": 0.0967, |
|
"step": 6850 |
|
}, |
|
{ |
|
"epoch": 87.39, |
|
"grad_norm": 2.6085259914398193, |
|
"learning_rate": 6.695156695156696e-06, |
|
"loss": 0.103, |
|
"step": 6860 |
|
}, |
|
{ |
|
"epoch": 87.52, |
|
"grad_norm": 2.8301172256469727, |
|
"learning_rate": 6.623931623931625e-06, |
|
"loss": 0.1051, |
|
"step": 6870 |
|
}, |
|
{ |
|
"epoch": 87.64, |
|
"grad_norm": 3.9067742824554443, |
|
"learning_rate": 6.5527065527065525e-06, |
|
"loss": 0.1025, |
|
"step": 6880 |
|
}, |
|
{ |
|
"epoch": 87.77, |
|
"grad_norm": 2.3788182735443115, |
|
"learning_rate": 6.481481481481481e-06, |
|
"loss": 0.1077, |
|
"step": 6890 |
|
}, |
|
{ |
|
"epoch": 87.9, |
|
"grad_norm": 2.764779806137085, |
|
"learning_rate": 6.41025641025641e-06, |
|
"loss": 0.0982, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"eval_accuracy": 0.9800448430493274, |
|
"eval_loss": 0.061907876282930374, |
|
"eval_runtime": 10.4602, |
|
"eval_samples_per_second": 426.379, |
|
"eval_steps_per_second": 3.346, |
|
"step": 6908 |
|
}, |
|
{ |
|
"epoch": 88.03, |
|
"grad_norm": 3.2199273109436035, |
|
"learning_rate": 6.33903133903134e-06, |
|
"loss": 0.1019, |
|
"step": 6910 |
|
}, |
|
{ |
|
"epoch": 88.15, |
|
"grad_norm": 2.0769879817962646, |
|
"learning_rate": 6.267806267806268e-06, |
|
"loss": 0.1063, |
|
"step": 6920 |
|
}, |
|
{ |
|
"epoch": 88.28, |
|
"grad_norm": 2.188361406326294, |
|
"learning_rate": 6.196581196581197e-06, |
|
"loss": 0.0914, |
|
"step": 6930 |
|
}, |
|
{ |
|
"epoch": 88.41, |
|
"grad_norm": 2.7577333450317383, |
|
"learning_rate": 6.1253561253561255e-06, |
|
"loss": 0.103, |
|
"step": 6940 |
|
}, |
|
{ |
|
"epoch": 88.54, |
|
"grad_norm": 4.330448627471924, |
|
"learning_rate": 6.054131054131054e-06, |
|
"loss": 0.1075, |
|
"step": 6950 |
|
}, |
|
{ |
|
"epoch": 88.66, |
|
"grad_norm": 2.961698055267334, |
|
"learning_rate": 5.982905982905984e-06, |
|
"loss": 0.1003, |
|
"step": 6960 |
|
}, |
|
{ |
|
"epoch": 88.79, |
|
"grad_norm": 3.129741668701172, |
|
"learning_rate": 5.911680911680912e-06, |
|
"loss": 0.101, |
|
"step": 6970 |
|
}, |
|
{ |
|
"epoch": 88.92, |
|
"grad_norm": 4.230170726776123, |
|
"learning_rate": 5.84045584045584e-06, |
|
"loss": 0.0944, |
|
"step": 6980 |
|
}, |
|
{ |
|
"epoch": 88.99, |
|
"eval_accuracy": 0.9795964125560538, |
|
"eval_loss": 0.060706257820129395, |
|
"eval_runtime": 11.2714, |
|
"eval_samples_per_second": 395.693, |
|
"eval_steps_per_second": 3.105, |
|
"step": 6986 |
|
}, |
|
{ |
|
"epoch": 89.04, |
|
"grad_norm": 3.656252145767212, |
|
"learning_rate": 5.76923076923077e-06, |
|
"loss": 0.1078, |
|
"step": 6990 |
|
}, |
|
{ |
|
"epoch": 89.17, |
|
"grad_norm": 3.191498041152954, |
|
"learning_rate": 5.6980056980056985e-06, |
|
"loss": 0.0929, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 89.3, |
|
"grad_norm": 2.7151176929473877, |
|
"learning_rate": 5.626780626780627e-06, |
|
"loss": 0.1012, |
|
"step": 7010 |
|
}, |
|
{ |
|
"epoch": 89.43, |
|
"grad_norm": 2.8162662982940674, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.1071, |
|
"step": 7020 |
|
}, |
|
{ |
|
"epoch": 89.55, |
|
"grad_norm": 3.192384719848633, |
|
"learning_rate": 5.4843304843304845e-06, |
|
"loss": 0.095, |
|
"step": 7030 |
|
}, |
|
{ |
|
"epoch": 89.68, |
|
"grad_norm": 2.0794003009796143, |
|
"learning_rate": 5.413105413105413e-06, |
|
"loss": 0.1047, |
|
"step": 7040 |
|
}, |
|
{ |
|
"epoch": 89.81, |
|
"grad_norm": 5.687547206878662, |
|
"learning_rate": 5.341880341880342e-06, |
|
"loss": 0.1027, |
|
"step": 7050 |
|
}, |
|
{ |
|
"epoch": 89.94, |
|
"grad_norm": 2.7075958251953125, |
|
"learning_rate": 5.270655270655271e-06, |
|
"loss": 0.0959, |
|
"step": 7060 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy": 0.9800448430493274, |
|
"eval_loss": 0.06611481308937073, |
|
"eval_runtime": 10.5465, |
|
"eval_samples_per_second": 422.889, |
|
"eval_steps_per_second": 3.319, |
|
"step": 7065 |
|
}, |
|
{ |
|
"epoch": 90.06, |
|
"grad_norm": 2.5442237854003906, |
|
"learning_rate": 5.1994301994302e-06, |
|
"loss": 0.1023, |
|
"step": 7070 |
|
}, |
|
{ |
|
"epoch": 90.19, |
|
"grad_norm": 2.451735019683838, |
|
"learning_rate": 5.128205128205128e-06, |
|
"loss": 0.0936, |
|
"step": 7080 |
|
}, |
|
{ |
|
"epoch": 90.32, |
|
"grad_norm": 3.2897391319274902, |
|
"learning_rate": 5.0569800569800575e-06, |
|
"loss": 0.1067, |
|
"step": 7090 |
|
}, |
|
{ |
|
"epoch": 90.45, |
|
"grad_norm": 2.906238079071045, |
|
"learning_rate": 4.985754985754986e-06, |
|
"loss": 0.0985, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 90.57, |
|
"grad_norm": 3.004779100418091, |
|
"learning_rate": 4.914529914529915e-06, |
|
"loss": 0.0874, |
|
"step": 7110 |
|
}, |
|
{ |
|
"epoch": 90.7, |
|
"grad_norm": 2.9214158058166504, |
|
"learning_rate": 4.8433048433048435e-06, |
|
"loss": 0.1001, |
|
"step": 7120 |
|
}, |
|
{ |
|
"epoch": 90.83, |
|
"grad_norm": 5.096872329711914, |
|
"learning_rate": 4.772079772079772e-06, |
|
"loss": 0.0938, |
|
"step": 7130 |
|
}, |
|
{ |
|
"epoch": 90.96, |
|
"grad_norm": 2.0840651988983154, |
|
"learning_rate": 4.700854700854701e-06, |
|
"loss": 0.101, |
|
"step": 7140 |
|
}, |
|
{ |
|
"epoch": 90.99, |
|
"eval_accuracy": 0.9809417040358744, |
|
"eval_loss": 0.0642046332359314, |
|
"eval_runtime": 10.3719, |
|
"eval_samples_per_second": 430.007, |
|
"eval_steps_per_second": 3.374, |
|
"step": 7143 |
|
}, |
|
{ |
|
"epoch": 91.08, |
|
"grad_norm": 3.488114356994629, |
|
"learning_rate": 4.6296296296296296e-06, |
|
"loss": 0.0997, |
|
"step": 7150 |
|
}, |
|
{ |
|
"epoch": 91.21, |
|
"grad_norm": 2.822373390197754, |
|
"learning_rate": 4.558404558404559e-06, |
|
"loss": 0.1048, |
|
"step": 7160 |
|
}, |
|
{ |
|
"epoch": 91.34, |
|
"grad_norm": 4.034416198730469, |
|
"learning_rate": 4.487179487179488e-06, |
|
"loss": 0.1008, |
|
"step": 7170 |
|
}, |
|
{ |
|
"epoch": 91.46, |
|
"grad_norm": 2.5757200717926025, |
|
"learning_rate": 4.415954415954416e-06, |
|
"loss": 0.1077, |
|
"step": 7180 |
|
}, |
|
{ |
|
"epoch": 91.59, |
|
"grad_norm": 4.5958662033081055, |
|
"learning_rate": 4.344729344729345e-06, |
|
"loss": 0.1003, |
|
"step": 7190 |
|
}, |
|
{ |
|
"epoch": 91.72, |
|
"grad_norm": 4.227506637573242, |
|
"learning_rate": 4.273504273504274e-06, |
|
"loss": 0.1032, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 91.85, |
|
"grad_norm": 3.0407326221466064, |
|
"learning_rate": 4.2022792022792025e-06, |
|
"loss": 0.1012, |
|
"step": 7210 |
|
}, |
|
{ |
|
"epoch": 91.97, |
|
"grad_norm": 4.446778774261475, |
|
"learning_rate": 4.131054131054131e-06, |
|
"loss": 0.1095, |
|
"step": 7220 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"eval_accuracy": 0.9807174887892377, |
|
"eval_loss": 0.060695547610521317, |
|
"eval_runtime": 10.4866, |
|
"eval_samples_per_second": 425.304, |
|
"eval_steps_per_second": 3.338, |
|
"step": 7222 |
|
}, |
|
{ |
|
"epoch": 92.1, |
|
"grad_norm": 2.901665449142456, |
|
"learning_rate": 4.05982905982906e-06, |
|
"loss": 0.0995, |
|
"step": 7230 |
|
}, |
|
{ |
|
"epoch": 92.23, |
|
"grad_norm": 2.991088390350342, |
|
"learning_rate": 3.988603988603989e-06, |
|
"loss": 0.098, |
|
"step": 7240 |
|
}, |
|
{ |
|
"epoch": 92.36, |
|
"grad_norm": 2.8773093223571777, |
|
"learning_rate": 3.917378917378917e-06, |
|
"loss": 0.1009, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 92.48, |
|
"grad_norm": 2.93807053565979, |
|
"learning_rate": 3.846153846153847e-06, |
|
"loss": 0.0943, |
|
"step": 7260 |
|
}, |
|
{ |
|
"epoch": 92.61, |
|
"grad_norm": 2.7378273010253906, |
|
"learning_rate": 3.774928774928775e-06, |
|
"loss": 0.0974, |
|
"step": 7270 |
|
}, |
|
{ |
|
"epoch": 92.74, |
|
"grad_norm": 2.933560609817505, |
|
"learning_rate": 3.7037037037037037e-06, |
|
"loss": 0.1009, |
|
"step": 7280 |
|
}, |
|
{ |
|
"epoch": 92.87, |
|
"grad_norm": 3.682366132736206, |
|
"learning_rate": 3.632478632478633e-06, |
|
"loss": 0.0935, |
|
"step": 7290 |
|
}, |
|
{ |
|
"epoch": 92.99, |
|
"grad_norm": 3.3825466632843018, |
|
"learning_rate": 3.5612535612535615e-06, |
|
"loss": 0.1079, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 92.99, |
|
"eval_accuracy": 0.9802690582959641, |
|
"eval_loss": 0.06095505878329277, |
|
"eval_runtime": 10.2605, |
|
"eval_samples_per_second": 434.675, |
|
"eval_steps_per_second": 3.411, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 93.12, |
|
"grad_norm": 2.7189486026763916, |
|
"learning_rate": 3.4900284900284902e-06, |
|
"loss": 0.0916, |
|
"step": 7310 |
|
}, |
|
{ |
|
"epoch": 93.25, |
|
"grad_norm": 2.3126866817474365, |
|
"learning_rate": 3.4188034188034193e-06, |
|
"loss": 0.099, |
|
"step": 7320 |
|
}, |
|
{ |
|
"epoch": 93.38, |
|
"grad_norm": 2.1135940551757812, |
|
"learning_rate": 3.347578347578348e-06, |
|
"loss": 0.089, |
|
"step": 7330 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"grad_norm": 4.08342981338501, |
|
"learning_rate": 3.2763532763532763e-06, |
|
"loss": 0.1045, |
|
"step": 7340 |
|
}, |
|
{ |
|
"epoch": 93.63, |
|
"grad_norm": 3.6746959686279297, |
|
"learning_rate": 3.205128205128205e-06, |
|
"loss": 0.1017, |
|
"step": 7350 |
|
}, |
|
{ |
|
"epoch": 93.76, |
|
"grad_norm": 2.6264023780822754, |
|
"learning_rate": 3.133903133903134e-06, |
|
"loss": 0.1037, |
|
"step": 7360 |
|
}, |
|
{ |
|
"epoch": 93.89, |
|
"grad_norm": 3.9072980880737305, |
|
"learning_rate": 3.0626780626780627e-06, |
|
"loss": 0.1153, |
|
"step": 7370 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"eval_accuracy": 0.9798206278026906, |
|
"eval_loss": 0.06323741376399994, |
|
"eval_runtime": 10.4676, |
|
"eval_samples_per_second": 426.078, |
|
"eval_steps_per_second": 3.344, |
|
"step": 7379 |
|
}, |
|
{ |
|
"epoch": 94.01, |
|
"grad_norm": 2.2694411277770996, |
|
"learning_rate": 2.991452991452992e-06, |
|
"loss": 0.0948, |
|
"step": 7380 |
|
}, |
|
{ |
|
"epoch": 94.14, |
|
"grad_norm": 2.713989019393921, |
|
"learning_rate": 2.92022792022792e-06, |
|
"loss": 0.0993, |
|
"step": 7390 |
|
}, |
|
{ |
|
"epoch": 94.27, |
|
"grad_norm": 2.7938032150268555, |
|
"learning_rate": 2.8490028490028492e-06, |
|
"loss": 0.1007, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 94.39, |
|
"grad_norm": 3.5172252655029297, |
|
"learning_rate": 2.777777777777778e-06, |
|
"loss": 0.0922, |
|
"step": 7410 |
|
}, |
|
{ |
|
"epoch": 94.52, |
|
"grad_norm": 2.6634156703948975, |
|
"learning_rate": 2.7065527065527066e-06, |
|
"loss": 0.0965, |
|
"step": 7420 |
|
}, |
|
{ |
|
"epoch": 94.65, |
|
"grad_norm": 3.6004881858825684, |
|
"learning_rate": 2.6353276353276357e-06, |
|
"loss": 0.1091, |
|
"step": 7430 |
|
}, |
|
{ |
|
"epoch": 94.78, |
|
"grad_norm": 2.2654991149902344, |
|
"learning_rate": 2.564102564102564e-06, |
|
"loss": 0.0934, |
|
"step": 7440 |
|
}, |
|
{ |
|
"epoch": 94.9, |
|
"grad_norm": 2.590730905532837, |
|
"learning_rate": 2.492877492877493e-06, |
|
"loss": 0.1022, |
|
"step": 7450 |
|
}, |
|
{ |
|
"epoch": 94.99, |
|
"eval_accuracy": 0.9811659192825112, |
|
"eval_loss": 0.061776064336299896, |
|
"eval_runtime": 10.6691, |
|
"eval_samples_per_second": 418.031, |
|
"eval_steps_per_second": 3.281, |
|
"step": 7457 |
|
}, |
|
{ |
|
"epoch": 95.03, |
|
"grad_norm": 2.9376516342163086, |
|
"learning_rate": 2.4216524216524218e-06, |
|
"loss": 0.0883, |
|
"step": 7460 |
|
}, |
|
{ |
|
"epoch": 95.16, |
|
"grad_norm": 3.090703248977661, |
|
"learning_rate": 2.3504273504273504e-06, |
|
"loss": 0.1053, |
|
"step": 7470 |
|
}, |
|
{ |
|
"epoch": 95.29, |
|
"grad_norm": 2.322587728500366, |
|
"learning_rate": 2.2792022792022796e-06, |
|
"loss": 0.0911, |
|
"step": 7480 |
|
}, |
|
{ |
|
"epoch": 95.41, |
|
"grad_norm": 3.8820295333862305, |
|
"learning_rate": 2.207977207977208e-06, |
|
"loss": 0.0998, |
|
"step": 7490 |
|
}, |
|
{ |
|
"epoch": 95.54, |
|
"grad_norm": 2.962207078933716, |
|
"learning_rate": 2.136752136752137e-06, |
|
"loss": 0.0934, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 95.67, |
|
"grad_norm": 3.2972400188446045, |
|
"learning_rate": 2.0655270655270656e-06, |
|
"loss": 0.0996, |
|
"step": 7510 |
|
}, |
|
{ |
|
"epoch": 95.8, |
|
"grad_norm": 3.117957830429077, |
|
"learning_rate": 1.9943019943019943e-06, |
|
"loss": 0.1057, |
|
"step": 7520 |
|
}, |
|
{ |
|
"epoch": 95.92, |
|
"grad_norm": 3.654308557510376, |
|
"learning_rate": 1.9230769230769234e-06, |
|
"loss": 0.1079, |
|
"step": 7530 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"eval_accuracy": 0.9809417040358744, |
|
"eval_loss": 0.06057490035891533, |
|
"eval_runtime": 10.8603, |
|
"eval_samples_per_second": 410.671, |
|
"eval_steps_per_second": 3.223, |
|
"step": 7536 |
|
}, |
|
{ |
|
"epoch": 96.05, |
|
"grad_norm": 3.377328395843506, |
|
"learning_rate": 1.8518518518518519e-06, |
|
"loss": 0.0884, |
|
"step": 7540 |
|
}, |
|
{ |
|
"epoch": 96.18, |
|
"grad_norm": 3.9910852909088135, |
|
"learning_rate": 1.7806267806267808e-06, |
|
"loss": 0.0989, |
|
"step": 7550 |
|
}, |
|
{ |
|
"epoch": 96.31, |
|
"grad_norm": 3.2044425010681152, |
|
"learning_rate": 1.7094017094017097e-06, |
|
"loss": 0.099, |
|
"step": 7560 |
|
}, |
|
{ |
|
"epoch": 96.43, |
|
"grad_norm": 2.561246156692505, |
|
"learning_rate": 1.6381766381766381e-06, |
|
"loss": 0.0914, |
|
"step": 7570 |
|
}, |
|
{ |
|
"epoch": 96.56, |
|
"grad_norm": 3.3193604946136475, |
|
"learning_rate": 1.566951566951567e-06, |
|
"loss": 0.0996, |
|
"step": 7580 |
|
}, |
|
{ |
|
"epoch": 96.69, |
|
"grad_norm": 2.501915216445923, |
|
"learning_rate": 1.495726495726496e-06, |
|
"loss": 0.1051, |
|
"step": 7590 |
|
}, |
|
{ |
|
"epoch": 96.82, |
|
"grad_norm": 3.687192916870117, |
|
"learning_rate": 1.4245014245014246e-06, |
|
"loss": 0.1013, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 96.94, |
|
"grad_norm": 2.6535141468048096, |
|
"learning_rate": 1.3532763532763533e-06, |
|
"loss": 0.0942, |
|
"step": 7610 |
|
}, |
|
{ |
|
"epoch": 96.99, |
|
"eval_accuracy": 0.9800448430493274, |
|
"eval_loss": 0.0612373948097229, |
|
"eval_runtime": 10.5685, |
|
"eval_samples_per_second": 422.011, |
|
"eval_steps_per_second": 3.312, |
|
"step": 7614 |
|
}, |
|
{ |
|
"epoch": 97.07, |
|
"grad_norm": 4.711557865142822, |
|
"learning_rate": 1.282051282051282e-06, |
|
"loss": 0.103, |
|
"step": 7620 |
|
}, |
|
{ |
|
"epoch": 97.2, |
|
"grad_norm": 4.340113162994385, |
|
"learning_rate": 1.2108262108262109e-06, |
|
"loss": 0.0955, |
|
"step": 7630 |
|
}, |
|
{ |
|
"epoch": 97.32, |
|
"grad_norm": 3.0971832275390625, |
|
"learning_rate": 1.1396011396011398e-06, |
|
"loss": 0.098, |
|
"step": 7640 |
|
}, |
|
{ |
|
"epoch": 97.45, |
|
"grad_norm": 2.318181276321411, |
|
"learning_rate": 1.0683760683760685e-06, |
|
"loss": 0.0952, |
|
"step": 7650 |
|
}, |
|
{ |
|
"epoch": 97.58, |
|
"grad_norm": 2.7054574489593506, |
|
"learning_rate": 9.971509971509971e-07, |
|
"loss": 0.102, |
|
"step": 7660 |
|
}, |
|
{ |
|
"epoch": 97.71, |
|
"grad_norm": 2.653917074203491, |
|
"learning_rate": 9.259259259259259e-07, |
|
"loss": 0.0943, |
|
"step": 7670 |
|
}, |
|
{ |
|
"epoch": 97.83, |
|
"grad_norm": 1.9929085969924927, |
|
"learning_rate": 8.547008547008548e-07, |
|
"loss": 0.1015, |
|
"step": 7680 |
|
}, |
|
{ |
|
"epoch": 97.96, |
|
"grad_norm": 2.189223289489746, |
|
"learning_rate": 7.834757834757835e-07, |
|
"loss": 0.0927, |
|
"step": 7690 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"eval_accuracy": 0.9809417040358744, |
|
"eval_loss": 0.05984415113925934, |
|
"eval_runtime": 10.4501, |
|
"eval_samples_per_second": 426.792, |
|
"eval_steps_per_second": 3.349, |
|
"step": 7693 |
|
}, |
|
{ |
|
"epoch": 98.09, |
|
"grad_norm": 2.4412922859191895, |
|
"learning_rate": 7.122507122507123e-07, |
|
"loss": 0.1048, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 98.22, |
|
"grad_norm": 2.839982748031616, |
|
"learning_rate": 6.41025641025641e-07, |
|
"loss": 0.1044, |
|
"step": 7710 |
|
}, |
|
{ |
|
"epoch": 98.34, |
|
"grad_norm": 1.9306455850601196, |
|
"learning_rate": 5.698005698005699e-07, |
|
"loss": 0.0911, |
|
"step": 7720 |
|
}, |
|
{ |
|
"epoch": 98.47, |
|
"grad_norm": 2.3731818199157715, |
|
"learning_rate": 4.985754985754986e-07, |
|
"loss": 0.0971, |
|
"step": 7730 |
|
}, |
|
{ |
|
"epoch": 98.6, |
|
"grad_norm": 3.8493146896362305, |
|
"learning_rate": 4.273504273504274e-07, |
|
"loss": 0.0941, |
|
"step": 7740 |
|
}, |
|
{ |
|
"epoch": 98.73, |
|
"grad_norm": 2.2244791984558105, |
|
"learning_rate": 3.5612535612535615e-07, |
|
"loss": 0.0998, |
|
"step": 7750 |
|
}, |
|
{ |
|
"epoch": 98.85, |
|
"grad_norm": 2.7025341987609863, |
|
"learning_rate": 2.8490028490028494e-07, |
|
"loss": 0.1015, |
|
"step": 7760 |
|
}, |
|
{ |
|
"epoch": 98.98, |
|
"grad_norm": 2.6602606773376465, |
|
"learning_rate": 2.136752136752137e-07, |
|
"loss": 0.1032, |
|
"step": 7770 |
|
}, |
|
{ |
|
"epoch": 98.99, |
|
"eval_accuracy": 0.981390134529148, |
|
"eval_loss": 0.06043354794383049, |
|
"eval_runtime": 10.296, |
|
"eval_samples_per_second": 433.177, |
|
"eval_steps_per_second": 3.399, |
|
"step": 7771 |
|
}, |
|
{ |
|
"epoch": 99.11, |
|
"grad_norm": 2.8605153560638428, |
|
"learning_rate": 1.4245014245014247e-07, |
|
"loss": 0.1033, |
|
"step": 7780 |
|
}, |
|
{ |
|
"epoch": 99.24, |
|
"grad_norm": 1.583977460861206, |
|
"learning_rate": 7.122507122507124e-08, |
|
"loss": 0.0991, |
|
"step": 7790 |
|
}, |
|
{ |
|
"epoch": 99.36, |
|
"grad_norm": 2.7007992267608643, |
|
"learning_rate": 0.0, |
|
"loss": 0.0925, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 99.36, |
|
"eval_accuracy": 0.9816143497757848, |
|
"eval_loss": 0.06034744158387184, |
|
"eval_runtime": 10.4984, |
|
"eval_samples_per_second": 424.827, |
|
"eval_steps_per_second": 3.334, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 99.36, |
|
"step": 7800, |
|
"total_flos": 9.913968910687958e+19, |
|
"train_loss": 0.21531945743621925, |
|
"train_runtime": 13739.5225, |
|
"train_samples_per_second": 292.15, |
|
"train_steps_per_second": 0.568 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 7800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 500, |
|
"total_flos": 9.913968910687958e+19, |
|
"train_batch_size": 128, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|