|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.07996801279488205, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007996801279488205, |
|
"grad_norm": 6.359498977661133, |
|
"learning_rate": 1e-05, |
|
"loss": 7.5787, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007996801279488205, |
|
"eval_loss": 7.8461995124816895, |
|
"eval_runtime": 65.4027, |
|
"eval_samples_per_second": 16.1, |
|
"eval_steps_per_second": 2.018, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001599360255897641, |
|
"grad_norm": 6.157934188842773, |
|
"learning_rate": 2e-05, |
|
"loss": 7.6096, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0023990403838464614, |
|
"grad_norm": 5.621501445770264, |
|
"learning_rate": 3e-05, |
|
"loss": 7.8058, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003198720511795282, |
|
"grad_norm": 6.5738677978515625, |
|
"learning_rate": 4e-05, |
|
"loss": 8.0013, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003998400639744102, |
|
"grad_norm": 6.065139293670654, |
|
"learning_rate": 5e-05, |
|
"loss": 7.1979, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004798080767692923, |
|
"grad_norm": 6.060821533203125, |
|
"learning_rate": 6e-05, |
|
"loss": 6.8884, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005597760895641743, |
|
"grad_norm": 6.489013195037842, |
|
"learning_rate": 7e-05, |
|
"loss": 7.6614, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006397441023590564, |
|
"grad_norm": 7.009871959686279, |
|
"learning_rate": 8e-05, |
|
"loss": 7.0684, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.007197121151539384, |
|
"grad_norm": 5.987380504608154, |
|
"learning_rate": 9e-05, |
|
"loss": 6.6335, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007197121151539384, |
|
"eval_loss": 6.301669597625732, |
|
"eval_runtime": 64.8317, |
|
"eval_samples_per_second": 16.242, |
|
"eval_steps_per_second": 2.036, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007996801279488205, |
|
"grad_norm": 7.33066463470459, |
|
"learning_rate": 0.0001, |
|
"loss": 6.4759, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.008796481407437025, |
|
"grad_norm": 7.483557224273682, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 6.0371, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009596161535385846, |
|
"grad_norm": 5.732779026031494, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 5.0003, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.010395841663334666, |
|
"grad_norm": 6.240601062774658, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 4.9242, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.011195521791283487, |
|
"grad_norm": 6.761163234710693, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 4.9644, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.011995201919232307, |
|
"grad_norm": 7.382585048675537, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 4.677, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.012794882047181128, |
|
"grad_norm": 8.166483879089355, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 4.6715, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.013594562175129948, |
|
"grad_norm": 8.002703666687012, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 4.3609, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.014394242303078768, |
|
"grad_norm": 6.686599254608154, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 4.4421, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.014394242303078768, |
|
"eval_loss": 4.342203140258789, |
|
"eval_runtime": 64.9139, |
|
"eval_samples_per_second": 16.221, |
|
"eval_steps_per_second": 2.033, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.015193922431027589, |
|
"grad_norm": 6.648100852966309, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 4.4866, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01599360255897641, |
|
"grad_norm": 5.807915210723877, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 4.2142, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01679328268692523, |
|
"grad_norm": 5.9541096687316895, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 4.485, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01759296281487405, |
|
"grad_norm": 6.071778297424316, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 4.1178, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.018392642942822873, |
|
"grad_norm": 5.7872185707092285, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 4.4135, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.01919232307077169, |
|
"grad_norm": 6.7662858963012695, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 4.3118, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.019992003198720514, |
|
"grad_norm": 5.508387088775635, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 4.1724, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.020791683326669332, |
|
"grad_norm": 5.747828006744385, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 4.3605, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.021591363454618154, |
|
"grad_norm": 6.477874279022217, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 4.1249, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.021591363454618154, |
|
"eval_loss": 4.122183322906494, |
|
"eval_runtime": 65.0038, |
|
"eval_samples_per_second": 16.199, |
|
"eval_steps_per_second": 2.031, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.022391043582566973, |
|
"grad_norm": 5.405932426452637, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 4.0658, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.023190723710515795, |
|
"grad_norm": 6.421638488769531, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 4.4341, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.023990403838464614, |
|
"grad_norm": 5.2888383865356445, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 4.1154, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.024790083966413436, |
|
"grad_norm": 5.253130912780762, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 3.9418, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.025589764094362255, |
|
"grad_norm": 4.973030090332031, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 3.9959, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.026389444222311077, |
|
"grad_norm": 6.877074718475342, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 4.397, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.027189124350259896, |
|
"grad_norm": 6.104552745819092, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 4.1429, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.027988804478208718, |
|
"grad_norm": 6.565894603729248, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 3.7672, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.028788484606157537, |
|
"grad_norm": 6.402747631072998, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 3.7757, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.028788484606157537, |
|
"eval_loss": 3.980862855911255, |
|
"eval_runtime": 64.8672, |
|
"eval_samples_per_second": 16.233, |
|
"eval_steps_per_second": 2.035, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02958816473410636, |
|
"grad_norm": 5.338390827178955, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 3.7549, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.030387844862055178, |
|
"grad_norm": 6.575376033782959, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 3.8059, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.031187524990004, |
|
"grad_norm": 6.826125621795654, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 4.0356, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03198720511795282, |
|
"grad_norm": 6.553135395050049, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 3.7619, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03278688524590164, |
|
"grad_norm": 7.697059631347656, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 3.9946, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.03358656537385046, |
|
"grad_norm": 5.825917720794678, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 3.9097, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03438624550179928, |
|
"grad_norm": 5.886120796203613, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 3.6738, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0351859256297481, |
|
"grad_norm": 6.294511795043945, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 4.0373, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03598560575769692, |
|
"grad_norm": 6.388889789581299, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 3.7921, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03598560575769692, |
|
"eval_loss": 3.93093204498291, |
|
"eval_runtime": 64.8235, |
|
"eval_samples_per_second": 16.244, |
|
"eval_steps_per_second": 2.036, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.036785285885645745, |
|
"grad_norm": 5.413031578063965, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 3.8995, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03758496601359456, |
|
"grad_norm": 4.84060525894165, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 3.9409, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03838464614154338, |
|
"grad_norm": 5.393630504608154, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 3.8495, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.039184326269492205, |
|
"grad_norm": 5.282654762268066, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 3.7573, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03998400639744103, |
|
"grad_norm": 4.579346179962158, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 4.0155, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04078368652538984, |
|
"grad_norm": 5.972344875335693, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 3.9595, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.041583366653338664, |
|
"grad_norm": 6.893924713134766, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 3.9452, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.04238304678128749, |
|
"grad_norm": 5.096782684326172, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 3.5734, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.04318272690923631, |
|
"grad_norm": 5.061729431152344, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 3.52, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.04318272690923631, |
|
"eval_loss": 3.8698742389678955, |
|
"eval_runtime": 64.9487, |
|
"eval_samples_per_second": 16.213, |
|
"eval_steps_per_second": 2.032, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.043982407037185124, |
|
"grad_norm": 5.499988079071045, |
|
"learning_rate": 5e-05, |
|
"loss": 4.0566, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.044782087165133946, |
|
"grad_norm": 5.24320125579834, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 3.736, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.04558176729308277, |
|
"grad_norm": 4.890809535980225, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 3.827, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.04638144742103159, |
|
"grad_norm": 5.598406791687012, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 3.5459, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.047181127548980406, |
|
"grad_norm": 5.470479965209961, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 3.7696, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.04798080767692923, |
|
"grad_norm": 6.037547588348389, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 4.1592, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.04878048780487805, |
|
"grad_norm": 5.132472038269043, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 3.7845, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.04958016793282687, |
|
"grad_norm": 6.724290370941162, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 3.7481, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.05037984806077569, |
|
"grad_norm": 4.897818565368652, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 3.8861, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.05037984806077569, |
|
"eval_loss": 3.841887950897217, |
|
"eval_runtime": 64.8922, |
|
"eval_samples_per_second": 16.227, |
|
"eval_steps_per_second": 2.034, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.05117952818872451, |
|
"grad_norm": 5.615442276000977, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 4.1557, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.05197920831667333, |
|
"grad_norm": 5.482508182525635, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 4.2472, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.052778888444622155, |
|
"grad_norm": 8.17457103729248, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 3.8963, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.05357856857257097, |
|
"grad_norm": 6.345828533172607, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 4.0517, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.05437824870051979, |
|
"grad_norm": 4.803496360778809, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 3.5726, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.055177928828468614, |
|
"grad_norm": 4.9716997146606445, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 3.7366, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.055977608956417436, |
|
"grad_norm": 5.260076999664307, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 3.8737, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.05677728908436625, |
|
"grad_norm": 5.536251068115234, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 3.7522, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.057576969212315074, |
|
"grad_norm": 4.966779708862305, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 3.9975, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.057576969212315074, |
|
"eval_loss": 3.810777187347412, |
|
"eval_runtime": 64.8369, |
|
"eval_samples_per_second": 16.241, |
|
"eval_steps_per_second": 2.036, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.058376649340263896, |
|
"grad_norm": 5.360210418701172, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 3.4565, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.05917632946821272, |
|
"grad_norm": 5.443131446838379, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 4.1542, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.059976009596161534, |
|
"grad_norm": 5.148866653442383, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 3.8738, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.060775689724110356, |
|
"grad_norm": 5.219843864440918, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 3.8506, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.06157536985205918, |
|
"grad_norm": 4.833649635314941, |
|
"learning_rate": 1.526708147705013e-05, |
|
"loss": 3.6357, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.062375049980008, |
|
"grad_norm": 5.183854103088379, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 3.8796, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.06317473010795682, |
|
"grad_norm": 4.822139263153076, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 3.8039, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.06397441023590564, |
|
"grad_norm": 5.325417518615723, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 3.7121, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.06477409036385445, |
|
"grad_norm": 5.68792200088501, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 3.8676, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.06477409036385445, |
|
"eval_loss": 3.790825605392456, |
|
"eval_runtime": 64.9998, |
|
"eval_samples_per_second": 16.2, |
|
"eval_steps_per_second": 2.031, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.06557377049180328, |
|
"grad_norm": 4.972930908203125, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 3.8641, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.0663734506197521, |
|
"grad_norm": 5.177651882171631, |
|
"learning_rate": 8.548121372247918e-06, |
|
"loss": 3.9937, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.06717313074770093, |
|
"grad_norm": 5.202006816864014, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 3.8965, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.06797281087564974, |
|
"grad_norm": 5.003640174865723, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 3.6745, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.06877249100359856, |
|
"grad_norm": 5.02064847946167, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 4.0451, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.06957217113154739, |
|
"grad_norm": 6.714007377624512, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 3.5304, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.0703718512594962, |
|
"grad_norm": 4.566380500793457, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 3.7025, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.07117153138744502, |
|
"grad_norm": 4.762030601501465, |
|
"learning_rate": 3.6408072716606346e-06, |
|
"loss": 3.5985, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.07197121151539385, |
|
"grad_norm": 4.6840314865112305, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 3.619, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07197121151539385, |
|
"eval_loss": 3.782498598098755, |
|
"eval_runtime": 64.88, |
|
"eval_samples_per_second": 16.23, |
|
"eval_steps_per_second": 2.035, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07277089164334266, |
|
"grad_norm": 5.267907619476318, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 3.6987, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.07357057177129149, |
|
"grad_norm": 4.929231643676758, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 3.5549, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.0743702518992403, |
|
"grad_norm": 4.731836795806885, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 3.7003, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.07516993202718912, |
|
"grad_norm": 5.483896255493164, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 3.7144, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.07596961215513795, |
|
"grad_norm": 4.872249603271484, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 3.985, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.07676929228308677, |
|
"grad_norm": 4.845506191253662, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 4.1145, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.07756897241103558, |
|
"grad_norm": 5.09095573425293, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 4.1055, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.07836865253898441, |
|
"grad_norm": 5.559736728668213, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 3.6654, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.07916833266693322, |
|
"grad_norm": 5.160393714904785, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 3.4554, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.07916833266693322, |
|
"eval_loss": 3.7806127071380615, |
|
"eval_runtime": 64.7968, |
|
"eval_samples_per_second": 16.251, |
|
"eval_steps_per_second": 2.037, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.07996801279488205, |
|
"grad_norm": 5.4672346115112305, |
|
"learning_rate": 0.0, |
|
"loss": 3.8669, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.41887686213632e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|