|
{ |
|
"best_metric": 0.5603644646924829, |
|
"best_model_checkpoint": "videomae-base-fatigue-detection-full/checkpoint-1800", |
|
"epoch": 2.3325193325193325, |
|
"eval_steps": 200, |
|
"global_step": 2457, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.0325203252032523e-06, |
|
"loss": 1.0932, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.0650406504065046e-06, |
|
"loss": 1.071, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.0975609756097564e-06, |
|
"loss": 1.0823, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.130081300813009e-06, |
|
"loss": 1.065, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.016260162601626e-05, |
|
"loss": 0.9151, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.2195121951219513e-05, |
|
"loss": 0.9317, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.4227642276422764e-05, |
|
"loss": 0.7747, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.6260162601626018e-05, |
|
"loss": 0.7065, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.8292682926829268e-05, |
|
"loss": 0.6079, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.032520325203252e-05, |
|
"loss": 0.4306, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.2357723577235773e-05, |
|
"loss": 0.5325, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.4390243902439026e-05, |
|
"loss": 0.2726, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.642276422764228e-05, |
|
"loss": 0.3247, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.8455284552845528e-05, |
|
"loss": 0.2971, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.048780487804878e-05, |
|
"loss": 0.2515, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.2520325203252037e-05, |
|
"loss": 0.2845, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.4552845528455286e-05, |
|
"loss": 0.1247, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.6585365853658535e-05, |
|
"loss": 0.0741, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.861788617886179e-05, |
|
"loss": 0.248, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.065040650406504e-05, |
|
"loss": 0.0402, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.4646924829157175, |
|
"eval_loss": 1.2843588590621948, |
|
"eval_runtime": 1127.5818, |
|
"eval_samples_per_second": 0.389, |
|
"eval_steps_per_second": 0.098, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.26829268292683e-05, |
|
"loss": 0.0652, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.4715447154471546e-05, |
|
"loss": 0.0827, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.6747967479674795e-05, |
|
"loss": 0.1339, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.878048780487805e-05, |
|
"loss": 0.0576, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.9909543193125285e-05, |
|
"loss": 0.0135, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.9683401175938496e-05, |
|
"loss": 0.0197, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.94572591587517e-05, |
|
"loss": 0.0287, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.9231117141564905e-05, |
|
"loss": 0.0066, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.900497512437811e-05, |
|
"loss": 0.0256, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.877883310719132e-05, |
|
"loss": 0.0016, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.8552691090004524e-05, |
|
"loss": 0.0036, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.8326549072817735e-05, |
|
"loss": 0.0039, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.810040705563094e-05, |
|
"loss": 0.0014, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.787426503844414e-05, |
|
"loss": 0.0012, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.7648123021257354e-05, |
|
"loss": 0.0007, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.742198100407056e-05, |
|
"loss": 0.0278, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.719583898688377e-05, |
|
"loss": 0.0004, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.696969696969697e-05, |
|
"loss": 0.0004, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.674355495251018e-05, |
|
"loss": 0.0004, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.651741293532338e-05, |
|
"loss": 0.0003, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.46924829157175396, |
|
"eval_loss": 1.65602445602417, |
|
"eval_runtime": 1082.8751, |
|
"eval_samples_per_second": 0.405, |
|
"eval_steps_per_second": 0.102, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.629127091813659e-05, |
|
"loss": 0.0003, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.60651289009498e-05, |
|
"loss": 0.0003, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.583898688376301e-05, |
|
"loss": 0.0003, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.561284486657621e-05, |
|
"loss": 0.0003, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.5386702849389416e-05, |
|
"loss": 0.0003, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.516056083220263e-05, |
|
"loss": 0.0003, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.493441881501583e-05, |
|
"loss": 0.0002, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.470827679782904e-05, |
|
"loss": 0.0002, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.4482134780642246e-05, |
|
"loss": 0.0002, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.425599276345545e-05, |
|
"loss": 0.0002, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.402985074626866e-05, |
|
"loss": 0.0002, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.3803708729081865e-05, |
|
"loss": 0.0002, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.3577566711895076e-05, |
|
"loss": 0.0002, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.335142469470828e-05, |
|
"loss": 0.0002, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.3125282677521484e-05, |
|
"loss": 0.0001, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.289914066033469e-05, |
|
"loss": 0.0001, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.26729986431479e-05, |
|
"loss": 0.0001, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.2446856625961104e-05, |
|
"loss": 0.0001, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.2220714608774314e-05, |
|
"loss": 0.0002, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.199457259158752e-05, |
|
"loss": 0.0001, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.4510250569476082, |
|
"eval_loss": 2.0519042015075684, |
|
"eval_runtime": 1093.6378, |
|
"eval_samples_per_second": 0.401, |
|
"eval_steps_per_second": 0.101, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.176843057440072e-05, |
|
"loss": 0.0002, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.1542288557213934e-05, |
|
"loss": 0.0001, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.131614654002714e-05, |
|
"loss": 0.0001, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.109000452284035e-05, |
|
"loss": 0.0001, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.086386250565355e-05, |
|
"loss": 0.0001, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.063772048846676e-05, |
|
"loss": 0.0001, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.041157847127997e-05, |
|
"loss": 0.0001, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.018543645409317e-05, |
|
"loss": 0.0004, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 3.995929443690638e-05, |
|
"loss": 0.0002, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 3.973315241971959e-05, |
|
"loss": 0.0003, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 3.950701040253279e-05, |
|
"loss": 0.0001, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 3.9280868385345995e-05, |
|
"loss": 0.0001, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 3.9054726368159206e-05, |
|
"loss": 0.0002, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 3.882858435097241e-05, |
|
"loss": 0.0001, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.860244233378562e-05, |
|
"loss": 0.0001, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.8376300316598826e-05, |
|
"loss": 0.0001, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.815015829941203e-05, |
|
"loss": 0.0001, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.792401628222524e-05, |
|
"loss": 0.0001, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.7697874265038445e-05, |
|
"loss": 0.0001, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 3.7471732247851656e-05, |
|
"loss": 0.0001, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.4396355353075171, |
|
"eval_loss": 1.6676476001739502, |
|
"eval_runtime": 1094.6937, |
|
"eval_samples_per_second": 0.401, |
|
"eval_steps_per_second": 0.1, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 3.724559023066486e-05, |
|
"loss": 0.0001, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 3.7019448213478064e-05, |
|
"loss": 0.0001, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.6793306196291275e-05, |
|
"loss": 0.0001, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 3.656716417910448e-05, |
|
"loss": 0.0001, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 3.634102216191769e-05, |
|
"loss": 0.0001, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 3.6114880144730894e-05, |
|
"loss": 0.0001, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 3.58887381275441e-05, |
|
"loss": 0.0001, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 3.56625961103573e-05, |
|
"loss": 0.0001, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 3.543645409317051e-05, |
|
"loss": 0.0001, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 3.521031207598372e-05, |
|
"loss": 0.0001, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 3.498417005879693e-05, |
|
"loss": 0.0001, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 3.475802804161013e-05, |
|
"loss": 0.0001, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 3.453188602442334e-05, |
|
"loss": 0.0001, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 3.430574400723655e-05, |
|
"loss": 0.0001, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 3.407960199004975e-05, |
|
"loss": 0.0001, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 3.385345997286296e-05, |
|
"loss": 0.0001, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 3.362731795567617e-05, |
|
"loss": 0.0001, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 3.340117593848937e-05, |
|
"loss": 0.0001, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 3.3175033921302575e-05, |
|
"loss": 0.0001, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 3.2948891904115786e-05, |
|
"loss": 0.0001, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"eval_accuracy": 0.4305239179954442, |
|
"eval_loss": 2.105757474899292, |
|
"eval_runtime": 1084.6609, |
|
"eval_samples_per_second": 0.405, |
|
"eval_steps_per_second": 0.101, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 3.2722749886929e-05, |
|
"loss": 0.0001, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 3.24966078697422e-05, |
|
"loss": 0.0001, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 3.2270465852555405e-05, |
|
"loss": 0.0001, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 3.204432383536861e-05, |
|
"loss": 0.0001, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 3.181818181818182e-05, |
|
"loss": 0.0001, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 3.1592039800995024e-05, |
|
"loss": 0.0001, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 3.1365897783808235e-05, |
|
"loss": 0.0001, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.113975576662144e-05, |
|
"loss": 0.0001, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.0913613749434644e-05, |
|
"loss": 0.0001, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.0687471732247855e-05, |
|
"loss": 0.0001, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 3.046132971506106e-05, |
|
"loss": 0.0001, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 3.0235187697874266e-05, |
|
"loss": 0.0001, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 3.0009045680687474e-05, |
|
"loss": 0.0001, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 2.9782903663500678e-05, |
|
"loss": 0.0001, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 2.9556761646313885e-05, |
|
"loss": 0.0001, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 2.9330619629127093e-05, |
|
"loss": 0.0001, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 2.91044776119403e-05, |
|
"loss": 0.0, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 2.8878335594753508e-05, |
|
"loss": 0.0001, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 2.865219357756671e-05, |
|
"loss": 0.0, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 2.842605156037992e-05, |
|
"loss": 0.0001, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"eval_accuracy": 0.4419134396355353, |
|
"eval_loss": 2.165936231613159, |
|
"eval_runtime": 1107.6493, |
|
"eval_samples_per_second": 0.396, |
|
"eval_steps_per_second": 0.099, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 2.8199909543193127e-05, |
|
"loss": 0.0001, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 2.7973767526006335e-05, |
|
"loss": 0.0001, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 2.7747625508819542e-05, |
|
"loss": 0.0001, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 2.752148349163275e-05, |
|
"loss": 0.0, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 2.729534147444595e-05, |
|
"loss": 0.0, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 2.7069199457259158e-05, |
|
"loss": 0.0, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 2.6843057440072366e-05, |
|
"loss": 0.0001, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 2.6616915422885573e-05, |
|
"loss": 0.0, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 2.639077340569878e-05, |
|
"loss": 0.0, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 2.6164631388511985e-05, |
|
"loss": 0.0, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 2.5938489371325192e-05, |
|
"loss": 0.0001, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 2.57123473541384e-05, |
|
"loss": 0.0001, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 2.5486205336951608e-05, |
|
"loss": 0.0, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 2.5260063319764815e-05, |
|
"loss": 0.0, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 2.5033921302578023e-05, |
|
"loss": 0.0, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 2.4807779285391227e-05, |
|
"loss": 0.0, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 2.4581637268204434e-05, |
|
"loss": 0.0, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 2.4355495251017642e-05, |
|
"loss": 0.0, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 2.412935323383085e-05, |
|
"loss": 0.0, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 2.3903211216644054e-05, |
|
"loss": 0.0, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_accuracy": 0.43735763097949887, |
|
"eval_loss": 2.219512939453125, |
|
"eval_runtime": 1089.3185, |
|
"eval_samples_per_second": 0.403, |
|
"eval_steps_per_second": 0.101, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 2.367706919945726e-05, |
|
"loss": 0.0, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 2.3450927182270465e-05, |
|
"loss": 0.0, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 2.3224785165083673e-05, |
|
"loss": 0.0, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 2.299864314789688e-05, |
|
"loss": 0.0, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 2.2772501130710084e-05, |
|
"loss": 0.0, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 2.2546359113523295e-05, |
|
"loss": 0.0, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 2.2320217096336503e-05, |
|
"loss": 0.0, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 2.2094075079149707e-05, |
|
"loss": 0.0, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 2.1867933061962915e-05, |
|
"loss": 0.0, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.164179104477612e-05, |
|
"loss": 0.0, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.1415649027589326e-05, |
|
"loss": 0.0, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.1189507010402534e-05, |
|
"loss": 0.0, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 2.0963364993215738e-05, |
|
"loss": 0.0, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 2.073722297602895e-05, |
|
"loss": 0.0, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 2.0511080958842156e-05, |
|
"loss": 0.0, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 2.028493894165536e-05, |
|
"loss": 0.0, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 2.0058796924468568e-05, |
|
"loss": 0.0, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.9832654907281776e-05, |
|
"loss": 0.0, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.960651289009498e-05, |
|
"loss": 0.0, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.9380370872908187e-05, |
|
"loss": 0.0, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"eval_accuracy": 0.43507972665148065, |
|
"eval_loss": 2.3093459606170654, |
|
"eval_runtime": 1108.3747, |
|
"eval_samples_per_second": 0.396, |
|
"eval_steps_per_second": 0.099, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.915422885572139e-05, |
|
"loss": 0.0, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.89280868385346e-05, |
|
"loss": 0.004, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.870194482134781e-05, |
|
"loss": 0.0117, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.8475802804161014e-05, |
|
"loss": 0.0001, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.824966078697422e-05, |
|
"loss": 0.0002, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 1.802351876978743e-05, |
|
"loss": 0.0001, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 1.7797376752600633e-05, |
|
"loss": 0.0001, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.757123473541384e-05, |
|
"loss": 0.0001, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.7345092718227045e-05, |
|
"loss": 0.0001, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.7118950701040252e-05, |
|
"loss": 0.0001, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 1.689280868385346e-05, |
|
"loss": 0.0001, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.0, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.6440524649479875e-05, |
|
"loss": 0.0, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.6214382632293083e-05, |
|
"loss": 0.0, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.5988240615106287e-05, |
|
"loss": 0.0002, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 1.5762098597919494e-05, |
|
"loss": 0.0, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 1.55359565807327e-05, |
|
"loss": 0.0001, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 1.5309814563545906e-05, |
|
"loss": 0.0001, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 1.5083672546359115e-05, |
|
"loss": 0.0, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.485753052917232e-05, |
|
"loss": 0.0013, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"eval_accuracy": 0.5603644646924829, |
|
"eval_loss": 2.1500625610351562, |
|
"eval_runtime": 1095.3897, |
|
"eval_samples_per_second": 0.401, |
|
"eval_steps_per_second": 0.1, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.4631388511985527e-05, |
|
"loss": 0.0001, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.4405246494798736e-05, |
|
"loss": 0.0001, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 1.417910447761194e-05, |
|
"loss": 0.0001, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 1.3952962460425148e-05, |
|
"loss": 0.0001, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.3726820443238355e-05, |
|
"loss": 0.003, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.3500678426051561e-05, |
|
"loss": 0.0002, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.3274536408864769e-05, |
|
"loss": 0.0001, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 1.3048394391677973e-05, |
|
"loss": 0.0001, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 1.282225237449118e-05, |
|
"loss": 0.0001, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.259611035730439e-05, |
|
"loss": 0.0001, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.2369968340117595e-05, |
|
"loss": 0.0001, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.2143826322930801e-05, |
|
"loss": 0.0001, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 1.1917684305744007e-05, |
|
"loss": 0.0001, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 1.1691542288557215e-05, |
|
"loss": 0.0001, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.1465400271370422e-05, |
|
"loss": 0.0001, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.1239258254183628e-05, |
|
"loss": 0.0001, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.1013116236996834e-05, |
|
"loss": 0.0, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 1.0786974219810041e-05, |
|
"loss": 0.0001, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 1.0560832202623249e-05, |
|
"loss": 0.0001, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.0334690185436455e-05, |
|
"loss": 0.0, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"eval_accuracy": 0.5148063781321185, |
|
"eval_loss": 2.2250020503997803, |
|
"eval_runtime": 1087.2971, |
|
"eval_samples_per_second": 0.404, |
|
"eval_steps_per_second": 0.101, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.010854816824966e-05, |
|
"loss": 0.0, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 9.882406151062868e-06, |
|
"loss": 0.0, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 9.656264133876076e-06, |
|
"loss": 0.0, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 9.430122116689281e-06, |
|
"loss": 0.0, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 9.203980099502487e-06, |
|
"loss": 0.0, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 8.977838082315695e-06, |
|
"loss": 0.0, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 8.751696065128902e-06, |
|
"loss": 0.0, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 8.525554047942108e-06, |
|
"loss": 0.0, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 8.299412030755314e-06, |
|
"loss": 0.0003, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 8.073270013568522e-06, |
|
"loss": 0.0, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 7.847127996381729e-06, |
|
"loss": 0.0001, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 7.620985979194935e-06, |
|
"loss": 0.0001, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 7.394843962008141e-06, |
|
"loss": 0.0, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 7.168701944821349e-06, |
|
"loss": 0.0, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 6.942559927634555e-06, |
|
"loss": 0.0, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 6.716417910447762e-06, |
|
"loss": 0.0004, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 6.4902758932609675e-06, |
|
"loss": 0.0, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 6.264133876074176e-06, |
|
"loss": 0.0002, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 6.037991858887382e-06, |
|
"loss": 0.0, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 5.811849841700588e-06, |
|
"loss": 0.0, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"eval_accuracy": 0.5330296127562643, |
|
"eval_loss": 2.6677799224853516, |
|
"eval_runtime": 1087.0337, |
|
"eval_samples_per_second": 0.404, |
|
"eval_steps_per_second": 0.101, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 5.585707824513795e-06, |
|
"loss": 0.0, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 5.359565807327002e-06, |
|
"loss": 0.0002, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 5.1334237901402085e-06, |
|
"loss": 0.0, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 4.907281772953415e-06, |
|
"loss": 0.0, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 4.681139755766622e-06, |
|
"loss": 0.0, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 4.4549977385798285e-06, |
|
"loss": 0.0, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 4.228855721393035e-06, |
|
"loss": 0.0, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 4.002713704206242e-06, |
|
"loss": 0.0, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 3.7765716870194486e-06, |
|
"loss": 0.0, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 3.550429669832655e-06, |
|
"loss": 0.0, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 3.324287652645862e-06, |
|
"loss": 0.0, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 3.0981456354590687e-06, |
|
"loss": 0.0, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.8720036182722753e-06, |
|
"loss": 0.0, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.645861601085482e-06, |
|
"loss": 0.0, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 2.4197195838986887e-06, |
|
"loss": 0.0, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 2.1935775667118954e-06, |
|
"loss": 0.0, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.967435549525102e-06, |
|
"loss": 0.0, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.7412935323383086e-06, |
|
"loss": 0.0, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.5151515151515152e-06, |
|
"loss": 0.0, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.289009497964722e-06, |
|
"loss": 0.0, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"eval_accuracy": 0.5079726651480638, |
|
"eval_loss": 2.713104724884033, |
|
"eval_runtime": 1085.6923, |
|
"eval_samples_per_second": 0.404, |
|
"eval_steps_per_second": 0.101, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.0628674807779286e-06, |
|
"loss": 0.0, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 8.367254635911353e-07, |
|
"loss": 0.0, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 6.10583446404342e-07, |
|
"loss": 0.0, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 3.844414292175486e-07, |
|
"loss": 0.0, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 1.582994120307553e-07, |
|
"loss": 0.0, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"step": 2457, |
|
"total_flos": 1.2241447696947216e+19, |
|
"train_loss": 0.047377249488086234, |
|
"train_runtime": 41549.3568, |
|
"train_samples_per_second": 0.237, |
|
"train_steps_per_second": 0.059 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2457.0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 200, |
|
"total_flos": 1.2241447696947216e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|