|
{ |
|
"best_metric": 0.9781976744186046, |
|
"best_model_checkpoint": "mobilenet_v2_1.0_224-finetuned-plantdisease/checkpoint-2614", |
|
"epoch": 19.965576592082616, |
|
"eval_steps": 500, |
|
"global_step": 2900, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06884681583476764, |
|
"grad_norm": 5.361344337463379, |
|
"learning_rate": 1.724137931034483e-06, |
|
"loss": 2.7576, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13769363166953527, |
|
"grad_norm": 5.03896427154541, |
|
"learning_rate": 3.448275862068966e-06, |
|
"loss": 2.7085, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.20654044750430292, |
|
"grad_norm": 5.099004745483398, |
|
"learning_rate": 5.172413793103448e-06, |
|
"loss": 2.7048, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.27538726333907054, |
|
"grad_norm": 5.294215202331543, |
|
"learning_rate": 6.896551724137932e-06, |
|
"loss": 2.6637, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3442340791738382, |
|
"grad_norm": 5.142210483551025, |
|
"learning_rate": 8.620689655172414e-06, |
|
"loss": 2.6127, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41308089500860584, |
|
"grad_norm": 5.095492839813232, |
|
"learning_rate": 1.0344827586206897e-05, |
|
"loss": 2.5458, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.4819277108433735, |
|
"grad_norm": 4.894885063171387, |
|
"learning_rate": 1.206896551724138e-05, |
|
"loss": 2.4968, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5507745266781411, |
|
"grad_norm": 4.9816365242004395, |
|
"learning_rate": 1.3793103448275863e-05, |
|
"loss": 2.4015, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6196213425129088, |
|
"grad_norm": 4.671796798706055, |
|
"learning_rate": 1.5517241379310346e-05, |
|
"loss": 2.3308, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6884681583476764, |
|
"grad_norm": 4.635168552398682, |
|
"learning_rate": 1.7241379310344828e-05, |
|
"loss": 2.2212, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7573149741824441, |
|
"grad_norm": 4.575173854827881, |
|
"learning_rate": 1.896551724137931e-05, |
|
"loss": 2.1272, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.8261617900172117, |
|
"grad_norm": 4.4043660163879395, |
|
"learning_rate": 2.0689655172413793e-05, |
|
"loss": 2.0085, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.8950086058519794, |
|
"grad_norm": 4.405401229858398, |
|
"learning_rate": 2.2413793103448276e-05, |
|
"loss": 1.9442, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.963855421686747, |
|
"grad_norm": 4.29350471496582, |
|
"learning_rate": 2.413793103448276e-05, |
|
"loss": 1.7982, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.9982788296041308, |
|
"eval_accuracy": 0.40358527131782945, |
|
"eval_loss": 1.9824707508087158, |
|
"eval_runtime": 8.0421, |
|
"eval_samples_per_second": 256.65, |
|
"eval_steps_per_second": 8.082, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.0327022375215147, |
|
"grad_norm": 4.268815040588379, |
|
"learning_rate": 2.5862068965517244e-05, |
|
"loss": 1.7064, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1015490533562822, |
|
"grad_norm": 4.16851806640625, |
|
"learning_rate": 2.7586206896551727e-05, |
|
"loss": 1.5684, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.1703958691910499, |
|
"grad_norm": 4.322150230407715, |
|
"learning_rate": 2.9310344827586206e-05, |
|
"loss": 1.4844, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.2392426850258176, |
|
"grad_norm": 3.958456039428711, |
|
"learning_rate": 3.103448275862069e-05, |
|
"loss": 1.3968, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.3080895008605853, |
|
"grad_norm": 3.7061522006988525, |
|
"learning_rate": 3.275862068965517e-05, |
|
"loss": 1.2972, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.3769363166953528, |
|
"grad_norm": 3.8323745727539062, |
|
"learning_rate": 3.4482758620689657e-05, |
|
"loss": 1.1544, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4457831325301205, |
|
"grad_norm": 3.52496600151062, |
|
"learning_rate": 3.620689655172414e-05, |
|
"loss": 1.0645, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.5146299483648882, |
|
"grad_norm": 3.744525194168091, |
|
"learning_rate": 3.793103448275862e-05, |
|
"loss": 1.0238, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.5834767641996557, |
|
"grad_norm": 3.494678497314453, |
|
"learning_rate": 3.965517241379311e-05, |
|
"loss": 0.9237, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.6523235800344234, |
|
"grad_norm": 3.6277663707733154, |
|
"learning_rate": 4.1379310344827587e-05, |
|
"loss": 0.881, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.721170395869191, |
|
"grad_norm": 2.9900975227355957, |
|
"learning_rate": 4.3103448275862066e-05, |
|
"loss": 0.7883, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7900172117039586, |
|
"grad_norm": 3.553596258163452, |
|
"learning_rate": 4.482758620689655e-05, |
|
"loss": 0.7713, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.8588640275387265, |
|
"grad_norm": 3.206496477127075, |
|
"learning_rate": 4.655172413793104e-05, |
|
"loss": 0.7063, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.927710843373494, |
|
"grad_norm": 3.446793556213379, |
|
"learning_rate": 4.827586206896552e-05, |
|
"loss": 0.6517, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.9965576592082617, |
|
"grad_norm": 3.7154784202575684, |
|
"learning_rate": 5e-05, |
|
"loss": 0.6137, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.9965576592082617, |
|
"eval_accuracy": 0.6414728682170543, |
|
"eval_loss": 1.1130067110061646, |
|
"eval_runtime": 7.6055, |
|
"eval_samples_per_second": 271.384, |
|
"eval_steps_per_second": 8.546, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.0654044750430294, |
|
"grad_norm": 3.542126417160034, |
|
"learning_rate": 4.980842911877395e-05, |
|
"loss": 0.6058, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.134251290877797, |
|
"grad_norm": 3.6798830032348633, |
|
"learning_rate": 4.96168582375479e-05, |
|
"loss": 0.5967, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.2030981067125643, |
|
"grad_norm": 3.2142186164855957, |
|
"learning_rate": 4.9425287356321845e-05, |
|
"loss": 0.5376, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.2719449225473323, |
|
"grad_norm": 3.1692137718200684, |
|
"learning_rate": 4.9233716475095786e-05, |
|
"loss": 0.5135, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.3407917383820998, |
|
"grad_norm": 2.785217523574829, |
|
"learning_rate": 4.904214559386973e-05, |
|
"loss": 0.4794, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.4096385542168672, |
|
"grad_norm": 3.252037763595581, |
|
"learning_rate": 4.885057471264368e-05, |
|
"loss": 0.5012, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.478485370051635, |
|
"grad_norm": 2.9605207443237305, |
|
"learning_rate": 4.865900383141763e-05, |
|
"loss": 0.4613, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.5473321858864026, |
|
"grad_norm": 3.263587474822998, |
|
"learning_rate": 4.846743295019157e-05, |
|
"loss": 0.4191, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.6161790017211706, |
|
"grad_norm": 3.321648120880127, |
|
"learning_rate": 4.827586206896552e-05, |
|
"loss": 0.4206, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.685025817555938, |
|
"grad_norm": 2.7976036071777344, |
|
"learning_rate": 4.8084291187739464e-05, |
|
"loss": 0.462, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.7538726333907055, |
|
"grad_norm": 3.0656702518463135, |
|
"learning_rate": 4.789272030651341e-05, |
|
"loss": 0.41, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.8227194492254735, |
|
"grad_norm": 3.332609176635742, |
|
"learning_rate": 4.770114942528736e-05, |
|
"loss": 0.4012, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.891566265060241, |
|
"grad_norm": 3.5820395946502686, |
|
"learning_rate": 4.7509578544061307e-05, |
|
"loss": 0.3905, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.960413080895009, |
|
"grad_norm": 3.3419365882873535, |
|
"learning_rate": 4.7318007662835254e-05, |
|
"loss": 0.4176, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.9948364888123926, |
|
"eval_accuracy": 0.8468992248062015, |
|
"eval_loss": 0.48874419927597046, |
|
"eval_runtime": 7.9016, |
|
"eval_samples_per_second": 261.213, |
|
"eval_steps_per_second": 8.226, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.0292598967297764, |
|
"grad_norm": 3.0542681217193604, |
|
"learning_rate": 4.7126436781609195e-05, |
|
"loss": 0.4024, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.098106712564544, |
|
"grad_norm": 2.760667562484741, |
|
"learning_rate": 4.693486590038315e-05, |
|
"loss": 0.3706, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.1669535283993113, |
|
"grad_norm": 3.2174575328826904, |
|
"learning_rate": 4.674329501915709e-05, |
|
"loss": 0.3509, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.2358003442340793, |
|
"grad_norm": 3.6685619354248047, |
|
"learning_rate": 4.655172413793104e-05, |
|
"loss": 0.3613, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.3046471600688467, |
|
"grad_norm": 3.4590651988983154, |
|
"learning_rate": 4.6360153256704985e-05, |
|
"loss": 0.346, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.3734939759036147, |
|
"grad_norm": 3.0743446350097656, |
|
"learning_rate": 4.616858237547893e-05, |
|
"loss": 0.3356, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 3.442340791738382, |
|
"grad_norm": 3.5646955966949463, |
|
"learning_rate": 4.597701149425287e-05, |
|
"loss": 0.3404, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.5111876075731496, |
|
"grad_norm": 3.2671332359313965, |
|
"learning_rate": 4.578544061302682e-05, |
|
"loss": 0.3419, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.580034423407917, |
|
"grad_norm": 2.7561419010162354, |
|
"learning_rate": 4.559386973180077e-05, |
|
"loss": 0.3306, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 3.648881239242685, |
|
"grad_norm": 3.1499881744384766, |
|
"learning_rate": 4.5402298850574716e-05, |
|
"loss": 0.3575, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 3.7177280550774525, |
|
"grad_norm": 2.576195240020752, |
|
"learning_rate": 4.5210727969348656e-05, |
|
"loss": 0.2954, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.7865748709122204, |
|
"grad_norm": 2.685713529586792, |
|
"learning_rate": 4.501915708812261e-05, |
|
"loss": 0.2971, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.855421686746988, |
|
"grad_norm": 3.1608219146728516, |
|
"learning_rate": 4.482758620689655e-05, |
|
"loss": 0.3057, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 3.9242685025817554, |
|
"grad_norm": 3.3374826908111572, |
|
"learning_rate": 4.46360153256705e-05, |
|
"loss": 0.2815, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 3.9931153184165233, |
|
"grad_norm": 3.0209603309631348, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.3107, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8943798449612403, |
|
"eval_loss": 0.34142178297042847, |
|
"eval_runtime": 7.9804, |
|
"eval_samples_per_second": 258.634, |
|
"eval_steps_per_second": 8.145, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 4.061962134251291, |
|
"grad_norm": 2.7664377689361572, |
|
"learning_rate": 4.4252873563218394e-05, |
|
"loss": 0.2689, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 4.130808950086059, |
|
"grad_norm": 2.80204439163208, |
|
"learning_rate": 4.406130268199234e-05, |
|
"loss": 0.3003, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.199655765920826, |
|
"grad_norm": 3.0250327587127686, |
|
"learning_rate": 4.386973180076628e-05, |
|
"loss": 0.2768, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 4.268502581755594, |
|
"grad_norm": 3.8367512226104736, |
|
"learning_rate": 4.367816091954024e-05, |
|
"loss": 0.2582, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 4.337349397590361, |
|
"grad_norm": 3.3538951873779297, |
|
"learning_rate": 4.348659003831418e-05, |
|
"loss": 0.2849, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 4.406196213425129, |
|
"grad_norm": 3.2220115661621094, |
|
"learning_rate": 4.3295019157088125e-05, |
|
"loss": 0.2744, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 4.475043029259897, |
|
"grad_norm": 3.196542263031006, |
|
"learning_rate": 4.3103448275862066e-05, |
|
"loss": 0.2744, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.5438898450946645, |
|
"grad_norm": 3.07588791847229, |
|
"learning_rate": 4.291187739463602e-05, |
|
"loss": 0.2766, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 4.612736660929432, |
|
"grad_norm": 3.722421169281006, |
|
"learning_rate": 4.272030651340996e-05, |
|
"loss": 0.2699, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 4.6815834767641995, |
|
"grad_norm": 2.5164051055908203, |
|
"learning_rate": 4.252873563218391e-05, |
|
"loss": 0.2401, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 4.750430292598967, |
|
"grad_norm": 3.1823108196258545, |
|
"learning_rate": 4.2337164750957856e-05, |
|
"loss": 0.2293, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 4.8192771084337345, |
|
"grad_norm": 3.4586544036865234, |
|
"learning_rate": 4.21455938697318e-05, |
|
"loss": 0.27, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.888123924268503, |
|
"grad_norm": 3.251978635787964, |
|
"learning_rate": 4.195402298850575e-05, |
|
"loss": 0.2551, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 4.95697074010327, |
|
"grad_norm": 2.606138229370117, |
|
"learning_rate": 4.17624521072797e-05, |
|
"loss": 0.2255, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 4.998278829604131, |
|
"eval_accuracy": 0.9123062015503876, |
|
"eval_loss": 0.27319762110710144, |
|
"eval_runtime": 7.8624, |
|
"eval_samples_per_second": 262.515, |
|
"eval_steps_per_second": 8.267, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 5.025817555938038, |
|
"grad_norm": 2.684196949005127, |
|
"learning_rate": 4.1570881226053646e-05, |
|
"loss": 0.2487, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 5.094664371772805, |
|
"grad_norm": 2.931312084197998, |
|
"learning_rate": 4.1379310344827587e-05, |
|
"loss": 0.2314, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 5.163511187607573, |
|
"grad_norm": 3.0064966678619385, |
|
"learning_rate": 4.1187739463601534e-05, |
|
"loss": 0.2293, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 5.232358003442341, |
|
"grad_norm": 3.3251922130584717, |
|
"learning_rate": 4.099616858237548e-05, |
|
"loss": 0.2388, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 5.301204819277109, |
|
"grad_norm": 1.7967256307601929, |
|
"learning_rate": 4.080459770114943e-05, |
|
"loss": 0.2169, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 5.370051635111876, |
|
"grad_norm": 3.74770450592041, |
|
"learning_rate": 4.061302681992337e-05, |
|
"loss": 0.2096, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 5.438898450946644, |
|
"grad_norm": 3.072739601135254, |
|
"learning_rate": 4.0421455938697324e-05, |
|
"loss": 0.2047, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 5.507745266781411, |
|
"grad_norm": 2.96258807182312, |
|
"learning_rate": 4.0229885057471265e-05, |
|
"loss": 0.2313, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 5.576592082616179, |
|
"grad_norm": 2.5839080810546875, |
|
"learning_rate": 4.003831417624521e-05, |
|
"loss": 0.2112, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 5.645438898450947, |
|
"grad_norm": 3.1343889236450195, |
|
"learning_rate": 3.984674329501916e-05, |
|
"loss": 0.2482, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 5.714285714285714, |
|
"grad_norm": 2.240661382675171, |
|
"learning_rate": 3.965517241379311e-05, |
|
"loss": 0.2146, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 5.783132530120482, |
|
"grad_norm": 3.1076242923736572, |
|
"learning_rate": 3.9463601532567055e-05, |
|
"loss": 0.2234, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 5.851979345955249, |
|
"grad_norm": 2.9466423988342285, |
|
"learning_rate": 3.9272030651340996e-05, |
|
"loss": 0.2301, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 5.920826161790017, |
|
"grad_norm": 3.1047613620758057, |
|
"learning_rate": 3.908045977011495e-05, |
|
"loss": 0.2202, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 5.989672977624785, |
|
"grad_norm": 2.182670831680298, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.1833, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 5.9965576592082614, |
|
"eval_accuracy": 0.7582364341085271, |
|
"eval_loss": 0.7461967468261719, |
|
"eval_runtime": 7.912, |
|
"eval_samples_per_second": 260.87, |
|
"eval_steps_per_second": 8.215, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 6.058519793459553, |
|
"grad_norm": 2.8595638275146484, |
|
"learning_rate": 3.869731800766284e-05, |
|
"loss": 0.198, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 6.12736660929432, |
|
"grad_norm": 1.812150478363037, |
|
"learning_rate": 3.850574712643678e-05, |
|
"loss": 0.17, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 6.196213425129088, |
|
"grad_norm": 2.8384931087493896, |
|
"learning_rate": 3.831417624521073e-05, |
|
"loss": 0.2387, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 6.265060240963855, |
|
"grad_norm": 4.172893047332764, |
|
"learning_rate": 3.8122605363984674e-05, |
|
"loss": 0.2202, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 6.333907056798623, |
|
"grad_norm": 4.461463928222656, |
|
"learning_rate": 3.793103448275862e-05, |
|
"loss": 0.2501, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 6.402753872633391, |
|
"grad_norm": 3.430365562438965, |
|
"learning_rate": 3.773946360153257e-05, |
|
"loss": 0.185, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 6.4716006884681585, |
|
"grad_norm": 3.3742434978485107, |
|
"learning_rate": 3.7547892720306517e-05, |
|
"loss": 0.2166, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 6.540447504302926, |
|
"grad_norm": 3.7096402645111084, |
|
"learning_rate": 3.735632183908046e-05, |
|
"loss": 0.234, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 6.6092943201376935, |
|
"grad_norm": 2.182164192199707, |
|
"learning_rate": 3.716475095785441e-05, |
|
"loss": 0.1852, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 6.678141135972461, |
|
"grad_norm": 2.585125684738159, |
|
"learning_rate": 3.697318007662835e-05, |
|
"loss": 0.1947, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 6.746987951807229, |
|
"grad_norm": 2.5588345527648926, |
|
"learning_rate": 3.67816091954023e-05, |
|
"loss": 0.211, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 6.815834767641997, |
|
"grad_norm": 3.0795257091522217, |
|
"learning_rate": 3.659003831417625e-05, |
|
"loss": 0.1967, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 6.884681583476764, |
|
"grad_norm": 3.7196614742279053, |
|
"learning_rate": 3.6398467432950195e-05, |
|
"loss": 0.1836, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.953528399311532, |
|
"grad_norm": 2.3857243061065674, |
|
"learning_rate": 3.620689655172414e-05, |
|
"loss": 0.2062, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 6.994836488812393, |
|
"eval_accuracy": 0.8803294573643411, |
|
"eval_loss": 0.3770907521247864, |
|
"eval_runtime": 7.9957, |
|
"eval_samples_per_second": 258.14, |
|
"eval_steps_per_second": 8.129, |
|
"step": 1016 |
|
}, |
|
{ |
|
"epoch": 7.022375215146299, |
|
"grad_norm": 2.5683236122131348, |
|
"learning_rate": 3.601532567049808e-05, |
|
"loss": 0.1754, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 7.091222030981067, |
|
"grad_norm": 2.679636001586914, |
|
"learning_rate": 3.582375478927204e-05, |
|
"loss": 0.1675, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 7.160068846815835, |
|
"grad_norm": 2.5351173877716064, |
|
"learning_rate": 3.563218390804598e-05, |
|
"loss": 0.1565, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 7.228915662650603, |
|
"grad_norm": 2.9072484970092773, |
|
"learning_rate": 3.5440613026819926e-05, |
|
"loss": 0.1813, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 7.29776247848537, |
|
"grad_norm": 2.5954692363739014, |
|
"learning_rate": 3.5249042145593867e-05, |
|
"loss": 0.2118, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 7.366609294320138, |
|
"grad_norm": 2.7799787521362305, |
|
"learning_rate": 3.505747126436782e-05, |
|
"loss": 0.1744, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 7.435456110154905, |
|
"grad_norm": 3.0674917697906494, |
|
"learning_rate": 3.486590038314176e-05, |
|
"loss": 0.1922, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 7.504302925989673, |
|
"grad_norm": 3.5969247817993164, |
|
"learning_rate": 3.467432950191571e-05, |
|
"loss": 0.1794, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 7.573149741824441, |
|
"grad_norm": 2.6146047115325928, |
|
"learning_rate": 3.4482758620689657e-05, |
|
"loss": 0.1682, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 7.641996557659208, |
|
"grad_norm": 2.785433053970337, |
|
"learning_rate": 3.4291187739463604e-05, |
|
"loss": 0.1901, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 7.710843373493976, |
|
"grad_norm": 2.3648314476013184, |
|
"learning_rate": 3.409961685823755e-05, |
|
"loss": 0.1521, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 7.779690189328743, |
|
"grad_norm": 2.3298792839050293, |
|
"learning_rate": 3.390804597701149e-05, |
|
"loss": 0.1901, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 7.848537005163511, |
|
"grad_norm": 2.2863216400146484, |
|
"learning_rate": 3.371647509578545e-05, |
|
"loss": 0.1706, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 7.917383820998279, |
|
"grad_norm": 3.0322484970092773, |
|
"learning_rate": 3.352490421455939e-05, |
|
"loss": 0.1612, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 7.986230636833047, |
|
"grad_norm": 2.785752534866333, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.1657, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.8541666666666666, |
|
"eval_loss": 0.47179850935935974, |
|
"eval_runtime": 7.8842, |
|
"eval_samples_per_second": 261.788, |
|
"eval_steps_per_second": 8.244, |
|
"step": 1162 |
|
}, |
|
{ |
|
"epoch": 8.055077452667813, |
|
"grad_norm": 3.0953948497772217, |
|
"learning_rate": 3.314176245210728e-05, |
|
"loss": 0.1848, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 8.123924268502583, |
|
"grad_norm": 2.7531402111053467, |
|
"learning_rate": 3.295019157088123e-05, |
|
"loss": 0.1451, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 8.19277108433735, |
|
"grad_norm": 3.849405288696289, |
|
"learning_rate": 3.275862068965517e-05, |
|
"loss": 0.1682, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 8.261617900172118, |
|
"grad_norm": 2.8468093872070312, |
|
"learning_rate": 3.256704980842912e-05, |
|
"loss": 0.1868, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 8.330464716006885, |
|
"grad_norm": 3.0676023960113525, |
|
"learning_rate": 3.2375478927203066e-05, |
|
"loss": 0.1745, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 8.399311531841652, |
|
"grad_norm": 2.8244659900665283, |
|
"learning_rate": 3.218390804597701e-05, |
|
"loss": 0.1783, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 8.46815834767642, |
|
"grad_norm": 3.3981621265411377, |
|
"learning_rate": 3.1992337164750954e-05, |
|
"loss": 0.1509, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 8.537005163511187, |
|
"grad_norm": 3.2597496509552, |
|
"learning_rate": 3.180076628352491e-05, |
|
"loss": 0.1463, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 8.605851979345955, |
|
"grad_norm": 2.98665452003479, |
|
"learning_rate": 3.160919540229885e-05, |
|
"loss": 0.1854, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 8.674698795180722, |
|
"grad_norm": 3.56426739692688, |
|
"learning_rate": 3.1417624521072797e-05, |
|
"loss": 0.1801, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 8.74354561101549, |
|
"grad_norm": 3.350994110107422, |
|
"learning_rate": 3.1226053639846744e-05, |
|
"loss": 0.146, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 8.812392426850257, |
|
"grad_norm": 2.637911081314087, |
|
"learning_rate": 3.103448275862069e-05, |
|
"loss": 0.1759, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 8.881239242685027, |
|
"grad_norm": 3.2409827709198, |
|
"learning_rate": 3.084291187739464e-05, |
|
"loss": 0.1897, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 8.950086058519794, |
|
"grad_norm": 2.5662424564361572, |
|
"learning_rate": 3.065134099616858e-05, |
|
"loss": 0.1427, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 8.99827882960413, |
|
"eval_accuracy": 0.8473837209302325, |
|
"eval_loss": 0.4902307689189911, |
|
"eval_runtime": 7.7511, |
|
"eval_samples_per_second": 266.284, |
|
"eval_steps_per_second": 8.386, |
|
"step": 1307 |
|
}, |
|
{ |
|
"epoch": 9.018932874354562, |
|
"grad_norm": 2.6389100551605225, |
|
"learning_rate": 3.045977011494253e-05, |
|
"loss": 0.1577, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 9.087779690189329, |
|
"grad_norm": 2.561455249786377, |
|
"learning_rate": 3.0268199233716475e-05, |
|
"loss": 0.1686, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 9.156626506024097, |
|
"grad_norm": 2.796520233154297, |
|
"learning_rate": 3.0076628352490422e-05, |
|
"loss": 0.1623, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 9.225473321858864, |
|
"grad_norm": 3.3344945907592773, |
|
"learning_rate": 2.988505747126437e-05, |
|
"loss": 0.1684, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 9.294320137693632, |
|
"grad_norm": 2.494370698928833, |
|
"learning_rate": 2.9693486590038317e-05, |
|
"loss": 0.1679, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 9.363166953528399, |
|
"grad_norm": 2.9866812229156494, |
|
"learning_rate": 2.950191570881226e-05, |
|
"loss": 0.168, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 9.432013769363166, |
|
"grad_norm": 2.8670761585235596, |
|
"learning_rate": 2.9310344827586206e-05, |
|
"loss": 0.1587, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 9.500860585197934, |
|
"grad_norm": 2.6785264015197754, |
|
"learning_rate": 2.9118773946360157e-05, |
|
"loss": 0.1487, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 9.569707401032701, |
|
"grad_norm": 3.9908454418182373, |
|
"learning_rate": 2.89272030651341e-05, |
|
"loss": 0.1913, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 9.638554216867469, |
|
"grad_norm": 3.4217581748962402, |
|
"learning_rate": 2.8735632183908045e-05, |
|
"loss": 0.1568, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 9.707401032702238, |
|
"grad_norm": 3.4280409812927246, |
|
"learning_rate": 2.8544061302681996e-05, |
|
"loss": 0.1335, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 9.776247848537006, |
|
"grad_norm": 3.57127046585083, |
|
"learning_rate": 2.835249042145594e-05, |
|
"loss": 0.1508, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 9.845094664371773, |
|
"grad_norm": 2.238633394241333, |
|
"learning_rate": 2.8160919540229884e-05, |
|
"loss": 0.1878, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 9.91394148020654, |
|
"grad_norm": 2.7804625034332275, |
|
"learning_rate": 2.796934865900383e-05, |
|
"loss": 0.1497, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 9.982788296041308, |
|
"grad_norm": 3.1482419967651367, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.1598, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 9.996557659208262, |
|
"eval_accuracy": 0.9273255813953488, |
|
"eval_loss": 0.22291144728660583, |
|
"eval_runtime": 7.6884, |
|
"eval_samples_per_second": 268.456, |
|
"eval_steps_per_second": 8.454, |
|
"step": 1452 |
|
}, |
|
{ |
|
"epoch": 10.051635111876076, |
|
"grad_norm": 2.5692484378814697, |
|
"learning_rate": 2.7586206896551727e-05, |
|
"loss": 0.1397, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 10.120481927710843, |
|
"grad_norm": 1.686840295791626, |
|
"learning_rate": 2.739463601532567e-05, |
|
"loss": 0.1246, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 10.18932874354561, |
|
"grad_norm": 2.7757647037506104, |
|
"learning_rate": 2.720306513409962e-05, |
|
"loss": 0.14, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 10.258175559380378, |
|
"grad_norm": 2.688798189163208, |
|
"learning_rate": 2.7011494252873566e-05, |
|
"loss": 0.1386, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 10.327022375215146, |
|
"grad_norm": 3.0354042053222656, |
|
"learning_rate": 2.681992337164751e-05, |
|
"loss": 0.1585, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 10.395869191049915, |
|
"grad_norm": 3.36676287651062, |
|
"learning_rate": 2.662835249042146e-05, |
|
"loss": 0.1368, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 10.464716006884682, |
|
"grad_norm": 2.3574771881103516, |
|
"learning_rate": 2.6436781609195405e-05, |
|
"loss": 0.1559, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 10.53356282271945, |
|
"grad_norm": 2.6529464721679688, |
|
"learning_rate": 2.624521072796935e-05, |
|
"loss": 0.154, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 10.602409638554217, |
|
"grad_norm": 3.449251174926758, |
|
"learning_rate": 2.6053639846743293e-05, |
|
"loss": 0.1401, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 10.671256454388985, |
|
"grad_norm": 2.449934482574463, |
|
"learning_rate": 2.5862068965517244e-05, |
|
"loss": 0.1408, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 10.740103270223752, |
|
"grad_norm": 2.8734378814697266, |
|
"learning_rate": 2.5670498084291188e-05, |
|
"loss": 0.1445, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 10.80895008605852, |
|
"grad_norm": 2.4971940517425537, |
|
"learning_rate": 2.5478927203065132e-05, |
|
"loss": 0.1643, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 10.877796901893287, |
|
"grad_norm": 3.7385921478271484, |
|
"learning_rate": 2.5287356321839083e-05, |
|
"loss": 0.1521, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 10.946643717728055, |
|
"grad_norm": 2.7152345180511475, |
|
"learning_rate": 2.5095785440613027e-05, |
|
"loss": 0.1504, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 10.994836488812393, |
|
"eval_accuracy": 0.8972868217054264, |
|
"eval_loss": 0.3021294176578522, |
|
"eval_runtime": 7.4857, |
|
"eval_samples_per_second": 275.726, |
|
"eval_steps_per_second": 8.683, |
|
"step": 1597 |
|
}, |
|
{ |
|
"epoch": 11.015490533562822, |
|
"grad_norm": 3.032806158065796, |
|
"learning_rate": 2.4904214559386975e-05, |
|
"loss": 0.1366, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 11.08433734939759, |
|
"grad_norm": 2.85953950881958, |
|
"learning_rate": 2.4712643678160922e-05, |
|
"loss": 0.1433, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 11.153184165232357, |
|
"grad_norm": 2.1525700092315674, |
|
"learning_rate": 2.4521072796934867e-05, |
|
"loss": 0.1455, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 11.222030981067126, |
|
"grad_norm": 2.248845100402832, |
|
"learning_rate": 2.4329501915708814e-05, |
|
"loss": 0.1269, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 11.290877796901894, |
|
"grad_norm": 2.333122968673706, |
|
"learning_rate": 2.413793103448276e-05, |
|
"loss": 0.156, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 11.359724612736661, |
|
"grad_norm": 2.1817004680633545, |
|
"learning_rate": 2.3946360153256706e-05, |
|
"loss": 0.1423, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 11.428571428571429, |
|
"grad_norm": 2.1537561416625977, |
|
"learning_rate": 2.3754789272030653e-05, |
|
"loss": 0.1327, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 11.497418244406196, |
|
"grad_norm": 1.6727231740951538, |
|
"learning_rate": 2.3563218390804597e-05, |
|
"loss": 0.1272, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 11.566265060240964, |
|
"grad_norm": 3.15240216255188, |
|
"learning_rate": 2.3371647509578545e-05, |
|
"loss": 0.1474, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 11.635111876075731, |
|
"grad_norm": 2.031428813934326, |
|
"learning_rate": 2.3180076628352492e-05, |
|
"loss": 0.1341, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 11.703958691910499, |
|
"grad_norm": 2.5997140407562256, |
|
"learning_rate": 2.2988505747126437e-05, |
|
"loss": 0.153, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 11.772805507745266, |
|
"grad_norm": 2.405869483947754, |
|
"learning_rate": 2.2796934865900384e-05, |
|
"loss": 0.1434, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 11.841652323580034, |
|
"grad_norm": 2.8463237285614014, |
|
"learning_rate": 2.2605363984674328e-05, |
|
"loss": 0.1545, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 11.910499139414803, |
|
"grad_norm": 2.8656628131866455, |
|
"learning_rate": 2.2413793103448276e-05, |
|
"loss": 0.1412, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 11.97934595524957, |
|
"grad_norm": 2.9610302448272705, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.1456, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9224806201550387, |
|
"eval_loss": 0.24217405915260315, |
|
"eval_runtime": 7.2188, |
|
"eval_samples_per_second": 285.921, |
|
"eval_steps_per_second": 9.004, |
|
"step": 1743 |
|
}, |
|
{ |
|
"epoch": 12.048192771084338, |
|
"grad_norm": 2.278578042984009, |
|
"learning_rate": 2.203065134099617e-05, |
|
"loss": 0.1079, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 12.117039586919105, |
|
"grad_norm": 2.1894547939300537, |
|
"learning_rate": 2.183908045977012e-05, |
|
"loss": 0.1298, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 12.185886402753873, |
|
"grad_norm": 2.7255043983459473, |
|
"learning_rate": 2.1647509578544062e-05, |
|
"loss": 0.1502, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 12.25473321858864, |
|
"grad_norm": 1.9726896286010742, |
|
"learning_rate": 2.145593869731801e-05, |
|
"loss": 0.1589, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 12.323580034423408, |
|
"grad_norm": 2.095195770263672, |
|
"learning_rate": 2.1264367816091954e-05, |
|
"loss": 0.1345, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 12.392426850258175, |
|
"grad_norm": 2.9772017002105713, |
|
"learning_rate": 2.10727969348659e-05, |
|
"loss": 0.1537, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 12.461273666092943, |
|
"grad_norm": 1.4328383207321167, |
|
"learning_rate": 2.088122605363985e-05, |
|
"loss": 0.1274, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 12.53012048192771, |
|
"grad_norm": 2.386387586593628, |
|
"learning_rate": 2.0689655172413793e-05, |
|
"loss": 0.1333, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 12.598967297762478, |
|
"grad_norm": 1.9341486692428589, |
|
"learning_rate": 2.049808429118774e-05, |
|
"loss": 0.1397, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 12.667814113597245, |
|
"grad_norm": 2.633357048034668, |
|
"learning_rate": 2.0306513409961685e-05, |
|
"loss": 0.1151, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 12.736660929432015, |
|
"grad_norm": 3.059253215789795, |
|
"learning_rate": 2.0114942528735632e-05, |
|
"loss": 0.119, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 12.805507745266782, |
|
"grad_norm": 1.629744291305542, |
|
"learning_rate": 1.992337164750958e-05, |
|
"loss": 0.1194, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 12.87435456110155, |
|
"grad_norm": 3.2468857765197754, |
|
"learning_rate": 1.9731800766283527e-05, |
|
"loss": 0.1567, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 12.943201376936317, |
|
"grad_norm": 2.7284107208251953, |
|
"learning_rate": 1.9540229885057475e-05, |
|
"loss": 0.119, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 12.99827882960413, |
|
"eval_accuracy": 0.9021317829457365, |
|
"eval_loss": 0.2836342751979828, |
|
"eval_runtime": 7.2838, |
|
"eval_samples_per_second": 283.368, |
|
"eval_steps_per_second": 8.924, |
|
"step": 1888 |
|
}, |
|
{ |
|
"epoch": 13.012048192771084, |
|
"grad_norm": 3.377997875213623, |
|
"learning_rate": 1.934865900383142e-05, |
|
"loss": 0.119, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 13.080895008605852, |
|
"grad_norm": 2.6370134353637695, |
|
"learning_rate": 1.9157088122605367e-05, |
|
"loss": 0.1394, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 13.14974182444062, |
|
"grad_norm": 2.5573413372039795, |
|
"learning_rate": 1.896551724137931e-05, |
|
"loss": 0.1475, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 13.218588640275387, |
|
"grad_norm": 2.2910749912261963, |
|
"learning_rate": 1.8773946360153258e-05, |
|
"loss": 0.1332, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 13.287435456110154, |
|
"grad_norm": 2.3592233657836914, |
|
"learning_rate": 1.8582375478927206e-05, |
|
"loss": 0.1373, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 13.356282271944922, |
|
"grad_norm": 2.7366604804992676, |
|
"learning_rate": 1.839080459770115e-05, |
|
"loss": 0.129, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 13.42512908777969, |
|
"grad_norm": 2.452705144882202, |
|
"learning_rate": 1.8199233716475097e-05, |
|
"loss": 0.1183, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 13.493975903614459, |
|
"grad_norm": 3.3416621685028076, |
|
"learning_rate": 1.800766283524904e-05, |
|
"loss": 0.154, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 13.562822719449226, |
|
"grad_norm": 3.04327654838562, |
|
"learning_rate": 1.781609195402299e-05, |
|
"loss": 0.1264, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 13.631669535283994, |
|
"grad_norm": 2.6300888061523438, |
|
"learning_rate": 1.7624521072796933e-05, |
|
"loss": 0.1646, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 13.700516351118761, |
|
"grad_norm": 3.809067487716675, |
|
"learning_rate": 1.743295019157088e-05, |
|
"loss": 0.1363, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 13.769363166953529, |
|
"grad_norm": 3.2865700721740723, |
|
"learning_rate": 1.7241379310344828e-05, |
|
"loss": 0.1145, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 13.838209982788296, |
|
"grad_norm": 2.2611865997314453, |
|
"learning_rate": 1.7049808429118776e-05, |
|
"loss": 0.116, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 13.907056798623064, |
|
"grad_norm": 2.3190841674804688, |
|
"learning_rate": 1.6858237547892723e-05, |
|
"loss": 0.1105, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 13.975903614457831, |
|
"grad_norm": 3.105708360671997, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.114, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 13.996557659208262, |
|
"eval_accuracy": 0.9292635658914729, |
|
"eval_loss": 0.20378853380680084, |
|
"eval_runtime": 7.3214, |
|
"eval_samples_per_second": 281.914, |
|
"eval_steps_per_second": 8.878, |
|
"step": 2033 |
|
}, |
|
{ |
|
"epoch": 14.044750430292599, |
|
"grad_norm": 2.9859328269958496, |
|
"learning_rate": 1.6475095785440615e-05, |
|
"loss": 0.1319, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 14.113597246127366, |
|
"grad_norm": 4.717568874359131, |
|
"learning_rate": 1.628352490421456e-05, |
|
"loss": 0.1426, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 14.182444061962133, |
|
"grad_norm": 3.3271358013153076, |
|
"learning_rate": 1.6091954022988507e-05, |
|
"loss": 0.1458, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 14.251290877796903, |
|
"grad_norm": 3.017382860183716, |
|
"learning_rate": 1.5900383141762454e-05, |
|
"loss": 0.1576, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 14.32013769363167, |
|
"grad_norm": 3.2462213039398193, |
|
"learning_rate": 1.5708812260536398e-05, |
|
"loss": 0.1438, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 14.388984509466438, |
|
"grad_norm": 3.2721612453460693, |
|
"learning_rate": 1.5517241379310346e-05, |
|
"loss": 0.1632, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 14.457831325301205, |
|
"grad_norm": 2.183432102203369, |
|
"learning_rate": 1.532567049808429e-05, |
|
"loss": 0.1071, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 14.526678141135973, |
|
"grad_norm": 2.744126319885254, |
|
"learning_rate": 1.5134099616858237e-05, |
|
"loss": 0.1234, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 14.59552495697074, |
|
"grad_norm": 1.959697961807251, |
|
"learning_rate": 1.4942528735632185e-05, |
|
"loss": 0.1224, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 14.664371772805508, |
|
"grad_norm": 3.4430899620056152, |
|
"learning_rate": 1.475095785440613e-05, |
|
"loss": 0.1174, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 14.733218588640275, |
|
"grad_norm": 4.045992374420166, |
|
"learning_rate": 1.4559386973180078e-05, |
|
"loss": 0.1315, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 14.802065404475043, |
|
"grad_norm": 2.5135693550109863, |
|
"learning_rate": 1.4367816091954022e-05, |
|
"loss": 0.111, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 14.87091222030981, |
|
"grad_norm": 3.5096664428710938, |
|
"learning_rate": 1.417624521072797e-05, |
|
"loss": 0.1429, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 14.939759036144578, |
|
"grad_norm": 2.6633498668670654, |
|
"learning_rate": 1.3984674329501916e-05, |
|
"loss": 0.1378, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 14.994836488812393, |
|
"eval_accuracy": 0.9239341085271318, |
|
"eval_loss": 0.2173481285572052, |
|
"eval_runtime": 7.5899, |
|
"eval_samples_per_second": 271.94, |
|
"eval_steps_per_second": 8.564, |
|
"step": 2178 |
|
}, |
|
{ |
|
"epoch": 15.008605851979347, |
|
"grad_norm": 2.4078171253204346, |
|
"learning_rate": 1.3793103448275863e-05, |
|
"loss": 0.1133, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 15.077452667814114, |
|
"grad_norm": 3.3953912258148193, |
|
"learning_rate": 1.360153256704981e-05, |
|
"loss": 0.1486, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 15.146299483648882, |
|
"grad_norm": 2.9477121829986572, |
|
"learning_rate": 1.3409961685823755e-05, |
|
"loss": 0.1097, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 15.21514629948365, |
|
"grad_norm": 2.8032114505767822, |
|
"learning_rate": 1.3218390804597702e-05, |
|
"loss": 0.136, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 15.283993115318417, |
|
"grad_norm": 3.5413339138031006, |
|
"learning_rate": 1.3026819923371647e-05, |
|
"loss": 0.1434, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 15.352839931153184, |
|
"grad_norm": 2.4817466735839844, |
|
"learning_rate": 1.2835249042145594e-05, |
|
"loss": 0.1178, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 15.421686746987952, |
|
"grad_norm": 2.3475193977355957, |
|
"learning_rate": 1.2643678160919542e-05, |
|
"loss": 0.0938, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 15.49053356282272, |
|
"grad_norm": 3.2757270336151123, |
|
"learning_rate": 1.2452107279693487e-05, |
|
"loss": 0.159, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 15.559380378657487, |
|
"grad_norm": 2.7539010047912598, |
|
"learning_rate": 1.2260536398467433e-05, |
|
"loss": 0.1351, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 15.628227194492254, |
|
"grad_norm": 2.8831663131713867, |
|
"learning_rate": 1.206896551724138e-05, |
|
"loss": 0.1341, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 15.697074010327022, |
|
"grad_norm": 3.276233434677124, |
|
"learning_rate": 1.1877394636015327e-05, |
|
"loss": 0.1348, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 15.76592082616179, |
|
"grad_norm": 2.2141618728637695, |
|
"learning_rate": 1.1685823754789272e-05, |
|
"loss": 0.1166, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 15.834767641996558, |
|
"grad_norm": 2.0663790702819824, |
|
"learning_rate": 1.1494252873563218e-05, |
|
"loss": 0.0998, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 15.903614457831326, |
|
"grad_norm": 2.9078729152679443, |
|
"learning_rate": 1.1302681992337164e-05, |
|
"loss": 0.1267, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 15.972461273666093, |
|
"grad_norm": 2.513744831085205, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.1249, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.9186046511627907, |
|
"eval_loss": 0.24669910967350006, |
|
"eval_runtime": 7.8261, |
|
"eval_samples_per_second": 263.732, |
|
"eval_steps_per_second": 8.306, |
|
"step": 2324 |
|
}, |
|
{ |
|
"epoch": 16.04130808950086, |
|
"grad_norm": 2.510911226272583, |
|
"learning_rate": 1.091954022988506e-05, |
|
"loss": 0.1052, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 16.110154905335627, |
|
"grad_norm": 2.5557456016540527, |
|
"learning_rate": 1.0727969348659005e-05, |
|
"loss": 0.1293, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 16.179001721170398, |
|
"grad_norm": 2.11968994140625, |
|
"learning_rate": 1.053639846743295e-05, |
|
"loss": 0.1139, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 16.247848537005165, |
|
"grad_norm": 2.4394736289978027, |
|
"learning_rate": 1.0344827586206897e-05, |
|
"loss": 0.1206, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 16.316695352839933, |
|
"grad_norm": 2.758610963821411, |
|
"learning_rate": 1.0153256704980842e-05, |
|
"loss": 0.1098, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 16.3855421686747, |
|
"grad_norm": 2.410712480545044, |
|
"learning_rate": 9.96168582375479e-06, |
|
"loss": 0.1052, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 16.454388984509468, |
|
"grad_norm": 2.830049991607666, |
|
"learning_rate": 9.770114942528738e-06, |
|
"loss": 0.11, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 16.523235800344235, |
|
"grad_norm": 2.921481132507324, |
|
"learning_rate": 9.578544061302683e-06, |
|
"loss": 0.1038, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 16.592082616179002, |
|
"grad_norm": 3.5350148677825928, |
|
"learning_rate": 9.386973180076629e-06, |
|
"loss": 0.1447, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 16.66092943201377, |
|
"grad_norm": 3.2687947750091553, |
|
"learning_rate": 9.195402298850575e-06, |
|
"loss": 0.119, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 16.729776247848537, |
|
"grad_norm": 2.843630790710449, |
|
"learning_rate": 9.00383141762452e-06, |
|
"loss": 0.1361, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 16.798623063683305, |
|
"grad_norm": 2.6418073177337646, |
|
"learning_rate": 8.812260536398467e-06, |
|
"loss": 0.1125, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 16.867469879518072, |
|
"grad_norm": 2.408013343811035, |
|
"learning_rate": 8.620689655172414e-06, |
|
"loss": 0.1354, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 16.93631669535284, |
|
"grad_norm": 3.4269073009490967, |
|
"learning_rate": 8.429118773946362e-06, |
|
"loss": 0.1504, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 16.998278829604132, |
|
"eval_accuracy": 0.9253875968992248, |
|
"eval_loss": 0.232245072722435, |
|
"eval_runtime": 7.9776, |
|
"eval_samples_per_second": 258.725, |
|
"eval_steps_per_second": 8.148, |
|
"step": 2469 |
|
}, |
|
{ |
|
"epoch": 17.005163511187607, |
|
"grad_norm": 3.9189724922180176, |
|
"learning_rate": 8.237547892720307e-06, |
|
"loss": 0.1346, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 17.074010327022375, |
|
"grad_norm": 3.447214126586914, |
|
"learning_rate": 8.045977011494253e-06, |
|
"loss": 0.1254, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 17.142857142857142, |
|
"grad_norm": 1.5260127782821655, |
|
"learning_rate": 7.854406130268199e-06, |
|
"loss": 0.1264, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 17.21170395869191, |
|
"grad_norm": 2.7160017490386963, |
|
"learning_rate": 7.662835249042145e-06, |
|
"loss": 0.1124, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 17.280550774526677, |
|
"grad_norm": 2.530102491378784, |
|
"learning_rate": 7.4712643678160925e-06, |
|
"loss": 0.1116, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 17.349397590361445, |
|
"grad_norm": 2.009098768234253, |
|
"learning_rate": 7.279693486590039e-06, |
|
"loss": 0.0977, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 17.418244406196212, |
|
"grad_norm": 2.3196418285369873, |
|
"learning_rate": 7.088122605363985e-06, |
|
"loss": 0.1527, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 17.48709122203098, |
|
"grad_norm": 2.2051141262054443, |
|
"learning_rate": 6.896551724137932e-06, |
|
"loss": 0.1285, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 17.555938037865747, |
|
"grad_norm": 2.9338431358337402, |
|
"learning_rate": 6.7049808429118775e-06, |
|
"loss": 0.1182, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 17.624784853700515, |
|
"grad_norm": 2.8327407836914062, |
|
"learning_rate": 6.513409961685823e-06, |
|
"loss": 0.0985, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 17.693631669535286, |
|
"grad_norm": 2.8627865314483643, |
|
"learning_rate": 6.321839080459771e-06, |
|
"loss": 0.1185, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 17.762478485370053, |
|
"grad_norm": 2.3446600437164307, |
|
"learning_rate": 6.130268199233717e-06, |
|
"loss": 0.1024, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 17.83132530120482, |
|
"grad_norm": 3.2858073711395264, |
|
"learning_rate": 5.938697318007663e-06, |
|
"loss": 0.1213, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 17.900172117039588, |
|
"grad_norm": 2.4796440601348877, |
|
"learning_rate": 5.747126436781609e-06, |
|
"loss": 0.0974, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 17.969018932874356, |
|
"grad_norm": 1.4947516918182373, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.0972, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 17.99655765920826, |
|
"eval_accuracy": 0.9781976744186046, |
|
"eval_loss": 0.08412329852581024, |
|
"eval_runtime": 8.8996, |
|
"eval_samples_per_second": 231.921, |
|
"eval_steps_per_second": 7.304, |
|
"step": 2614 |
|
}, |
|
{ |
|
"epoch": 18.037865748709123, |
|
"grad_norm": 2.7059268951416016, |
|
"learning_rate": 5.3639846743295025e-06, |
|
"loss": 0.1074, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 18.10671256454389, |
|
"grad_norm": 2.0837604999542236, |
|
"learning_rate": 5.172413793103448e-06, |
|
"loss": 0.106, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 18.175559380378658, |
|
"grad_norm": 3.52249813079834, |
|
"learning_rate": 4.980842911877395e-06, |
|
"loss": 0.1475, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 18.244406196213426, |
|
"grad_norm": 2.8017690181732178, |
|
"learning_rate": 4.789272030651342e-06, |
|
"loss": 0.128, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 18.313253012048193, |
|
"grad_norm": 1.7369544506072998, |
|
"learning_rate": 4.5977011494252875e-06, |
|
"loss": 0.0977, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 18.38209982788296, |
|
"grad_norm": 2.63714599609375, |
|
"learning_rate": 4.406130268199233e-06, |
|
"loss": 0.1067, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 18.450946643717728, |
|
"grad_norm": 2.7819557189941406, |
|
"learning_rate": 4.214559386973181e-06, |
|
"loss": 0.1364, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 18.519793459552496, |
|
"grad_norm": 2.259345769882202, |
|
"learning_rate": 4.022988505747127e-06, |
|
"loss": 0.0957, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 18.588640275387263, |
|
"grad_norm": 2.4775497913360596, |
|
"learning_rate": 3.8314176245210725e-06, |
|
"loss": 0.1273, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 18.65748709122203, |
|
"grad_norm": 2.889547824859619, |
|
"learning_rate": 3.6398467432950196e-06, |
|
"loss": 0.1293, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 18.726333907056798, |
|
"grad_norm": 2.8670730590820312, |
|
"learning_rate": 3.448275862068966e-06, |
|
"loss": 0.1279, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 18.795180722891565, |
|
"grad_norm": 1.9103004932403564, |
|
"learning_rate": 3.2567049808429117e-06, |
|
"loss": 0.1292, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 18.864027538726333, |
|
"grad_norm": 1.6978858709335327, |
|
"learning_rate": 3.0651340996168583e-06, |
|
"loss": 0.1182, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 18.9328743545611, |
|
"grad_norm": 3.6564149856567383, |
|
"learning_rate": 2.8735632183908046e-06, |
|
"loss": 0.1293, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 18.994836488812393, |
|
"eval_accuracy": 0.9467054263565892, |
|
"eval_loss": 0.15116389095783234, |
|
"eval_runtime": 7.8213, |
|
"eval_samples_per_second": 263.893, |
|
"eval_steps_per_second": 8.311, |
|
"step": 2759 |
|
}, |
|
{ |
|
"epoch": 19.001721170395868, |
|
"grad_norm": 3.7127535343170166, |
|
"learning_rate": 2.6819923371647512e-06, |
|
"loss": 0.1027, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 19.070567986230635, |
|
"grad_norm": 2.170724868774414, |
|
"learning_rate": 2.4904214559386975e-06, |
|
"loss": 0.0993, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 19.139414802065403, |
|
"grad_norm": 3.131479501724243, |
|
"learning_rate": 2.2988505747126437e-06, |
|
"loss": 0.1378, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 19.20826161790017, |
|
"grad_norm": 1.942002534866333, |
|
"learning_rate": 2.1072796934865904e-06, |
|
"loss": 0.1163, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 19.27710843373494, |
|
"grad_norm": 2.8317081928253174, |
|
"learning_rate": 1.9157088122605362e-06, |
|
"loss": 0.1467, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 19.34595524956971, |
|
"grad_norm": 1.791185736656189, |
|
"learning_rate": 1.724137931034483e-06, |
|
"loss": 0.1139, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 19.414802065404476, |
|
"grad_norm": 2.639554023742676, |
|
"learning_rate": 1.5325670498084292e-06, |
|
"loss": 0.1139, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 19.483648881239244, |
|
"grad_norm": 1.9929583072662354, |
|
"learning_rate": 1.3409961685823756e-06, |
|
"loss": 0.1093, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 19.55249569707401, |
|
"grad_norm": 2.7852931022644043, |
|
"learning_rate": 1.1494252873563219e-06, |
|
"loss": 0.1305, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 19.62134251290878, |
|
"grad_norm": 3.228508710861206, |
|
"learning_rate": 9.578544061302681e-07, |
|
"loss": 0.1236, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 19.690189328743546, |
|
"grad_norm": 3.5496957302093506, |
|
"learning_rate": 7.662835249042146e-07, |
|
"loss": 0.1357, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 19.759036144578314, |
|
"grad_norm": 1.8435531854629517, |
|
"learning_rate": 5.747126436781609e-07, |
|
"loss": 0.1134, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 19.82788296041308, |
|
"grad_norm": 3.3433897495269775, |
|
"learning_rate": 3.831417624521073e-07, |
|
"loss": 0.1472, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 19.89672977624785, |
|
"grad_norm": 2.541207790374756, |
|
"learning_rate": 1.9157088122605365e-07, |
|
"loss": 0.1057, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 19.965576592082616, |
|
"grad_norm": 3.152078151702881, |
|
"learning_rate": 0.0, |
|
"loss": 0.1072, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 19.965576592082616, |
|
"eval_accuracy": 0.9447674418604651, |
|
"eval_loss": 0.16629785299301147, |
|
"eval_runtime": 7.3483, |
|
"eval_samples_per_second": 280.88, |
|
"eval_steps_per_second": 8.846, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 19.965576592082616, |
|
"step": 2900, |
|
"total_flos": 9.813099581032366e+17, |
|
"train_loss": 0.3346556580683281, |
|
"train_runtime": 3422.7405, |
|
"train_samples_per_second": 108.533, |
|
"train_steps_per_second": 0.847 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2900, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.813099581032366e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|