|
{ |
|
"best_metric": 2.6855108737945557, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.014773776546629732, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00014773776546629733, |
|
"grad_norm": 0.1680404245853424, |
|
"learning_rate": 4e-05, |
|
"loss": 2.2693, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00014773776546629733, |
|
"eval_loss": 2.8410422801971436, |
|
"eval_runtime": 245.1756, |
|
"eval_samples_per_second": 11.624, |
|
"eval_steps_per_second": 5.812, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00029547553093259467, |
|
"grad_norm": 0.24187016487121582, |
|
"learning_rate": 8e-05, |
|
"loss": 2.3835, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00044321329639889195, |
|
"grad_norm": 0.27124741673469543, |
|
"learning_rate": 0.00012, |
|
"loss": 2.5753, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0005909510618651893, |
|
"grad_norm": 0.32595115900039673, |
|
"learning_rate": 0.00016, |
|
"loss": 2.4737, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0007386888273314866, |
|
"grad_norm": 0.3274131715297699, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5374, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0008864265927977839, |
|
"grad_norm": 0.31691431999206543, |
|
"learning_rate": 0.00019994532573409262, |
|
"loss": 2.6162, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0010341643582640813, |
|
"grad_norm": 0.35264068841934204, |
|
"learning_rate": 0.00019978136272187747, |
|
"loss": 2.5054, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0011819021237303787, |
|
"grad_norm": 0.39142340421676636, |
|
"learning_rate": 0.00019950829025450114, |
|
"loss": 2.565, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.001329639889196676, |
|
"grad_norm": 0.3724002242088318, |
|
"learning_rate": 0.00019912640693269752, |
|
"loss": 2.5553, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0014773776546629731, |
|
"grad_norm": 0.5595511794090271, |
|
"learning_rate": 0.00019863613034027224, |
|
"loss": 2.5109, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0016251154201292705, |
|
"grad_norm": 0.4720703959465027, |
|
"learning_rate": 0.00019803799658748094, |
|
"loss": 2.6227, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0017728531855955678, |
|
"grad_norm": 0.428568571805954, |
|
"learning_rate": 0.0001973326597248006, |
|
"loss": 2.5642, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0019205909510618651, |
|
"grad_norm": 0.523729681968689, |
|
"learning_rate": 0.00019652089102773488, |
|
"loss": 2.5874, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0020683287165281627, |
|
"grad_norm": 0.48841235041618347, |
|
"learning_rate": 0.00019560357815343577, |
|
"loss": 2.4838, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00221606648199446, |
|
"grad_norm": 0.5847761034965515, |
|
"learning_rate": 0.00019458172417006347, |
|
"loss": 2.5853, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0023638042474607573, |
|
"grad_norm": 0.5334342122077942, |
|
"learning_rate": 0.0001934564464599461, |
|
"loss": 2.6511, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0025115420129270547, |
|
"grad_norm": 0.5539087057113647, |
|
"learning_rate": 0.00019222897549773848, |
|
"loss": 2.5072, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.002659279778393352, |
|
"grad_norm": 0.5858405828475952, |
|
"learning_rate": 0.00019090065350491626, |
|
"loss": 2.7543, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.002807017543859649, |
|
"grad_norm": 0.6383663415908813, |
|
"learning_rate": 0.00018947293298207635, |
|
"loss": 2.9209, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0029547553093259462, |
|
"grad_norm": 0.5604156851768494, |
|
"learning_rate": 0.0001879473751206489, |
|
"loss": 2.5698, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0031024930747922436, |
|
"grad_norm": 0.5957999229431152, |
|
"learning_rate": 0.00018632564809575742, |
|
"loss": 2.6138, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.003250230840258541, |
|
"grad_norm": 0.7011343836784363, |
|
"learning_rate": 0.00018460952524209355, |
|
"loss": 2.6191, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0033979686057248383, |
|
"grad_norm": 0.6490035653114319, |
|
"learning_rate": 0.00018280088311480201, |
|
"loss": 2.7153, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0035457063711911356, |
|
"grad_norm": 0.7848305106163025, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 2.6823, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.003693444136657433, |
|
"grad_norm": 0.7075931429862976, |
|
"learning_rate": 0.00017891405093963938, |
|
"loss": 2.5126, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.003693444136657433, |
|
"eval_loss": 2.715409517288208, |
|
"eval_runtime": 245.526, |
|
"eval_samples_per_second": 11.608, |
|
"eval_steps_per_second": 5.804, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0038411819021237303, |
|
"grad_norm": 0.8052982687950134, |
|
"learning_rate": 0.00017684011108568592, |
|
"loss": 2.8032, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.003988919667590028, |
|
"grad_norm": 0.7346736192703247, |
|
"learning_rate": 0.0001746821476984154, |
|
"loss": 2.5539, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.004136657433056325, |
|
"grad_norm": 0.7615510821342468, |
|
"learning_rate": 0.00017244252047910892, |
|
"loss": 2.7599, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.004284395198522623, |
|
"grad_norm": 0.8807427883148193, |
|
"learning_rate": 0.00017012367842724887, |
|
"loss": 2.6514, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.00443213296398892, |
|
"grad_norm": 0.8408799171447754, |
|
"learning_rate": 0.00016772815716257412, |
|
"loss": 2.823, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.004579870729455217, |
|
"grad_norm": 0.9535984992980957, |
|
"learning_rate": 0.00016525857615241687, |
|
"loss": 2.7928, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.004727608494921515, |
|
"grad_norm": 0.931502640247345, |
|
"learning_rate": 0.0001627176358473537, |
|
"loss": 3.0296, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.004875346260387812, |
|
"grad_norm": 0.9972584247589111, |
|
"learning_rate": 0.00016010811472830252, |
|
"loss": 2.7615, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.005023084025854109, |
|
"grad_norm": 0.9503821730613708, |
|
"learning_rate": 0.00015743286626829437, |
|
"loss": 2.9016, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.005170821791320407, |
|
"grad_norm": 0.8864520788192749, |
|
"learning_rate": 0.00015469481581224272, |
|
"loss": 3.2727, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.005318559556786704, |
|
"grad_norm": 0.7939389944076538, |
|
"learning_rate": 0.00015189695737812152, |
|
"loss": 2.6502, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0054662973222530005, |
|
"grad_norm": 0.8255647420883179, |
|
"learning_rate": 0.00014904235038305083, |
|
"loss": 3.0277, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.005614035087719298, |
|
"grad_norm": 0.9084526896476746, |
|
"learning_rate": 0.0001461341162978688, |
|
"loss": 2.8495, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.005761772853185595, |
|
"grad_norm": 0.8007035255432129, |
|
"learning_rate": 0.00014317543523384928, |
|
"loss": 2.9053, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0059095106186518925, |
|
"grad_norm": 0.8639392852783203, |
|
"learning_rate": 0.00014016954246529696, |
|
"loss": 2.804, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.00605724838411819, |
|
"grad_norm": 0.9926638603210449, |
|
"learning_rate": 0.00013711972489182208, |
|
"loss": 2.8584, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.006204986149584487, |
|
"grad_norm": 0.9736136198043823, |
|
"learning_rate": 0.00013402931744416433, |
|
"loss": 2.8866, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0063527239150507845, |
|
"grad_norm": 0.8373548984527588, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 2.7566, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.006500461680517082, |
|
"grad_norm": 0.8510177731513977, |
|
"learning_rate": 0.00012774029087618446, |
|
"loss": 2.8319, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.006648199445983379, |
|
"grad_norm": 1.0112504959106445, |
|
"learning_rate": 0.00012454854871407994, |
|
"loss": 2.9611, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0067959372114496765, |
|
"grad_norm": 0.9748664498329163, |
|
"learning_rate": 0.0001213299630743747, |
|
"loss": 2.8568, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.006943674976915974, |
|
"grad_norm": 0.935685396194458, |
|
"learning_rate": 0.000118088053433211, |
|
"loss": 2.8491, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.007091412742382271, |
|
"grad_norm": 1.117539405822754, |
|
"learning_rate": 0.0001148263647711842, |
|
"loss": 3.2332, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0072391505078485685, |
|
"grad_norm": 1.5835413932800293, |
|
"learning_rate": 0.00011154846369695863, |
|
"loss": 2.9418, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.007386888273314866, |
|
"grad_norm": 2.222666025161743, |
|
"learning_rate": 0.00010825793454723325, |
|
"loss": 2.7793, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.007386888273314866, |
|
"eval_loss": 2.7024636268615723, |
|
"eval_runtime": 245.5234, |
|
"eval_samples_per_second": 11.608, |
|
"eval_steps_per_second": 5.804, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.007534626038781163, |
|
"grad_norm": 0.41233178973197937, |
|
"learning_rate": 0.00010495837546732224, |
|
"loss": 2.283, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0076823638042474605, |
|
"grad_norm": 0.45200082659721375, |
|
"learning_rate": 0.00010165339447663587, |
|
"loss": 2.615, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.007830101569713758, |
|
"grad_norm": 0.44340193271636963, |
|
"learning_rate": 9.834660552336415e-05, |
|
"loss": 2.2959, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.007977839335180056, |
|
"grad_norm": 0.3993021547794342, |
|
"learning_rate": 9.504162453267777e-05, |
|
"loss": 2.5127, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.008125577100646353, |
|
"grad_norm": 0.38962098956108093, |
|
"learning_rate": 9.174206545276677e-05, |
|
"loss": 2.6104, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.00827331486611265, |
|
"grad_norm": 0.4015566408634186, |
|
"learning_rate": 8.845153630304139e-05, |
|
"loss": 2.3183, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.008421052631578947, |
|
"grad_norm": 0.3686407804489136, |
|
"learning_rate": 8.517363522881579e-05, |
|
"loss": 2.4809, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.008568790397045245, |
|
"grad_norm": 0.3890494704246521, |
|
"learning_rate": 8.191194656678904e-05, |
|
"loss": 2.7139, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.008716528162511542, |
|
"grad_norm": 0.3961982727050781, |
|
"learning_rate": 7.867003692562534e-05, |
|
"loss": 2.6264, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.00886426592797784, |
|
"grad_norm": 0.4162742495536804, |
|
"learning_rate": 7.54514512859201e-05, |
|
"loss": 2.4902, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.009012003693444137, |
|
"grad_norm": 0.3902261257171631, |
|
"learning_rate": 7.225970912381556e-05, |
|
"loss": 2.5772, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.009159741458910435, |
|
"grad_norm": 0.3940078616142273, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 2.6563, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.009307479224376731, |
|
"grad_norm": 0.4395645558834076, |
|
"learning_rate": 6.59706825558357e-05, |
|
"loss": 2.6348, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.00945521698984303, |
|
"grad_norm": 0.4575493335723877, |
|
"learning_rate": 6.28802751081779e-05, |
|
"loss": 2.5413, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.009602954755309326, |
|
"grad_norm": 0.4489690065383911, |
|
"learning_rate": 5.983045753470308e-05, |
|
"loss": 2.4907, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.009750692520775624, |
|
"grad_norm": 0.4150414764881134, |
|
"learning_rate": 5.6824564766150726e-05, |
|
"loss": 2.4324, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.00989843028624192, |
|
"grad_norm": 0.46985292434692383, |
|
"learning_rate": 5.386588370213124e-05, |
|
"loss": 2.4636, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.010046168051708219, |
|
"grad_norm": 0.4917268753051758, |
|
"learning_rate": 5.095764961694922e-05, |
|
"loss": 2.5561, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.010193905817174515, |
|
"grad_norm": 0.5081900954246521, |
|
"learning_rate": 4.810304262187852e-05, |
|
"loss": 2.7343, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.010341643582640813, |
|
"grad_norm": 0.4855806827545166, |
|
"learning_rate": 4.530518418775733e-05, |
|
"loss": 2.7617, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.01048938134810711, |
|
"grad_norm": 0.5101497769355774, |
|
"learning_rate": 4.256713373170564e-05, |
|
"loss": 2.6342, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.010637119113573408, |
|
"grad_norm": 0.5174307227134705, |
|
"learning_rate": 3.9891885271697496e-05, |
|
"loss": 2.7903, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.010784856879039705, |
|
"grad_norm": 0.550068199634552, |
|
"learning_rate": 3.7282364152646297e-05, |
|
"loss": 2.6579, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.010932594644506001, |
|
"grad_norm": 0.5740828514099121, |
|
"learning_rate": 3.4741423847583134e-05, |
|
"loss": 2.6477, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.0110803324099723, |
|
"grad_norm": 0.5267476439476013, |
|
"learning_rate": 3.227184283742591e-05, |
|
"loss": 2.7395, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0110803324099723, |
|
"eval_loss": 2.689473867416382, |
|
"eval_runtime": 245.5378, |
|
"eval_samples_per_second": 11.607, |
|
"eval_steps_per_second": 5.804, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.011228070175438596, |
|
"grad_norm": 0.5013121962547302, |
|
"learning_rate": 2.9876321572751144e-05, |
|
"loss": 2.6454, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.011375807940904894, |
|
"grad_norm": 0.6422709226608276, |
|
"learning_rate": 2.7557479520891104e-05, |
|
"loss": 2.7858, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.01152354570637119, |
|
"grad_norm": 0.5303881764411926, |
|
"learning_rate": 2.5317852301584643e-05, |
|
"loss": 2.5976, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.011671283471837489, |
|
"grad_norm": 0.7301583290100098, |
|
"learning_rate": 2.315988891431412e-05, |
|
"loss": 2.5867, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.011819021237303785, |
|
"grad_norm": 0.6291911005973816, |
|
"learning_rate": 2.1085949060360654e-05, |
|
"loss": 2.7627, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.011966759002770083, |
|
"grad_norm": 0.5394213795661926, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 2.6842, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.01211449676823638, |
|
"grad_norm": 0.6252685189247131, |
|
"learning_rate": 1.7199116885197995e-05, |
|
"loss": 2.8988, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.012262234533702678, |
|
"grad_norm": 0.607553243637085, |
|
"learning_rate": 1.5390474757906446e-05, |
|
"loss": 2.7113, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.012409972299168974, |
|
"grad_norm": 0.6846975088119507, |
|
"learning_rate": 1.3674351904242611e-05, |
|
"loss": 2.6447, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.012557710064635273, |
|
"grad_norm": 0.6939342021942139, |
|
"learning_rate": 1.2052624879351104e-05, |
|
"loss": 2.9223, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.012705447830101569, |
|
"grad_norm": 0.6672337651252747, |
|
"learning_rate": 1.0527067017923654e-05, |
|
"loss": 2.8346, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.012853185595567867, |
|
"grad_norm": 0.7558186650276184, |
|
"learning_rate": 9.09934649508375e-06, |
|
"loss": 2.8925, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.013000923361034164, |
|
"grad_norm": 0.7285046577453613, |
|
"learning_rate": 7.771024502261526e-06, |
|
"loss": 2.75, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.013148661126500462, |
|
"grad_norm": 0.8400636315345764, |
|
"learning_rate": 6.543553540053926e-06, |
|
"loss": 2.8976, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.013296398891966758, |
|
"grad_norm": 0.7853424549102783, |
|
"learning_rate": 5.418275829936537e-06, |
|
"loss": 3.1096, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.013444136657433057, |
|
"grad_norm": 0.7648143172264099, |
|
"learning_rate": 4.3964218465642355e-06, |
|
"loss": 2.9453, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.013591874422899353, |
|
"grad_norm": 0.8376665711402893, |
|
"learning_rate": 3.4791089722651436e-06, |
|
"loss": 2.8444, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.013739612188365651, |
|
"grad_norm": 0.8066268563270569, |
|
"learning_rate": 2.667340275199426e-06, |
|
"loss": 2.993, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.013887349953831948, |
|
"grad_norm": 0.9552287459373474, |
|
"learning_rate": 1.9620034125190644e-06, |
|
"loss": 2.8803, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.014035087719298246, |
|
"grad_norm": 0.8276549577713013, |
|
"learning_rate": 1.3638696597277679e-06, |
|
"loss": 2.8189, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.014182825484764542, |
|
"grad_norm": 0.8577640056610107, |
|
"learning_rate": 8.735930673024806e-07, |
|
"loss": 3.1154, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.01433056325023084, |
|
"grad_norm": 0.8612611889839172, |
|
"learning_rate": 4.917097454988584e-07, |
|
"loss": 2.4841, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.014478301015697137, |
|
"grad_norm": 0.9725845456123352, |
|
"learning_rate": 2.1863727812254653e-07, |
|
"loss": 2.7285, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.014626038781163435, |
|
"grad_norm": 1.032256841659546, |
|
"learning_rate": 5.467426590739511e-08, |
|
"loss": 3.125, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.014773776546629732, |
|
"grad_norm": 1.573689341545105, |
|
"learning_rate": 0.0, |
|
"loss": 2.9277, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.014773776546629732, |
|
"eval_loss": 2.6855108737945557, |
|
"eval_runtime": 245.6241, |
|
"eval_samples_per_second": 11.603, |
|
"eval_steps_per_second": 5.802, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.5458351538176e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|