|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.3999625503230035, |
|
"global_step": 17088, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.1682242990654204e-06, |
|
"loss": 10.9364, |
|
"theoretical_loss": 20.81281780154715, |
|
"tokens_seen": 65536 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 5.841121495327103e-05, |
|
"loss": 8.9947, |
|
"theoretical_loss": 8.563482664611069, |
|
"tokens_seen": 3276800 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00011682242990654206, |
|
"loss": 7.0133, |
|
"theoretical_loss": 7.4777587180480305, |
|
"tokens_seen": 6553600 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00017523364485981307, |
|
"loss": 6.2244, |
|
"theoretical_loss": 6.9337544888949, |
|
"tokens_seen": 9830400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00023364485981308412, |
|
"loss": 5.822, |
|
"theoretical_loss": 6.583566228426414, |
|
"tokens_seen": 13107200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00029205607476635517, |
|
"loss": 5.5642, |
|
"theoretical_loss": 6.330713565116083, |
|
"tokens_seen": 16384000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00035046728971962614, |
|
"loss": 5.3688, |
|
"theoretical_loss": 6.135529231940326, |
|
"tokens_seen": 19660800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0004088785046728972, |
|
"loss": 5.1987, |
|
"theoretical_loss": 5.978101583869607, |
|
"tokens_seen": 22937600 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00046728971962616824, |
|
"loss": 5.1026, |
|
"theoretical_loss": 5.8471173262659235, |
|
"tokens_seen": 26214400 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.000499739928125591, |
|
"loss": 4.9312, |
|
"theoretical_loss": 5.7355768158821245, |
|
"tokens_seen": 29491200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0004991488556837526, |
|
"loss": 4.8598, |
|
"theoretical_loss": 5.638870144071353, |
|
"tokens_seen": 32768000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0004985577832419141, |
|
"loss": 4.7297, |
|
"theoretical_loss": 5.553812381844907, |
|
"tokens_seen": 36044800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0004979667108000757, |
|
"loss": 4.6466, |
|
"theoretical_loss": 5.478118080556438, |
|
"tokens_seen": 39321600 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004973756383582371, |
|
"loss": 4.5646, |
|
"theoretical_loss": 5.410095959579362, |
|
"tokens_seen": 42598400 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004967845659163987, |
|
"loss": 4.5162, |
|
"theoretical_loss": 5.348462083735834, |
|
"tokens_seen": 45875200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004961934934745603, |
|
"loss": 4.3809, |
|
"theoretical_loss": 5.292220566937567, |
|
"tokens_seen": 49152000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004956024210327218, |
|
"loss": 4.336, |
|
"theoretical_loss": 5.240584625769978, |
|
"tokens_seen": 52428800 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004950113485908833, |
|
"loss": 4.2829, |
|
"theoretical_loss": 5.192922724525789, |
|
"tokens_seen": 55705600 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004944202761490448, |
|
"loss": 4.209, |
|
"theoretical_loss": 5.1487208633564405, |
|
"tokens_seen": 58982400 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004938292037072064, |
|
"loss": 4.0751, |
|
"theoretical_loss": 5.107555562405102, |
|
"tokens_seen": 62259200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004932381312653678, |
|
"loss": 3.9696, |
|
"theoretical_loss": 5.069074117143246, |
|
"tokens_seen": 65536000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004926470588235294, |
|
"loss": 3.9197, |
|
"theoretical_loss": 5.032979909838007, |
|
"tokens_seen": 68812800 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000492055986381691, |
|
"loss": 3.8378, |
|
"theoretical_loss": 4.999021308224664, |
|
"tokens_seen": 72089600 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0004914649139398525, |
|
"loss": 3.7856, |
|
"theoretical_loss": 4.966983155351962, |
|
"tokens_seen": 75366400 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000490873841498014, |
|
"loss": 3.6988, |
|
"theoretical_loss": 4.9366801616251355, |
|
"tokens_seen": 78643200 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0004902827690561755, |
|
"loss": 3.6826, |
|
"theoretical_loss": 4.907951713830082, |
|
"tokens_seen": 81920000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0004896916966143371, |
|
"loss": 3.6271, |
|
"theoretical_loss": 4.880657753812926, |
|
"tokens_seen": 85196800 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0004891006241724985, |
|
"loss": 3.617, |
|
"theoretical_loss": 4.854675474481779, |
|
"tokens_seen": 88473600 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0004885095517306601, |
|
"loss": 3.528, |
|
"theoretical_loss": 4.8298966473088125, |
|
"tokens_seen": 91750400 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0004879184792888217, |
|
"loss": 3.5513, |
|
"theoretical_loss": 4.8062254427779205, |
|
"tokens_seen": 95027200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0004873274068469832, |
|
"loss": 3.4962, |
|
"theoretical_loss": 4.783576639276257, |
|
"tokens_seen": 98304000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00048673633440514467, |
|
"loss": 3.5334, |
|
"theoretical_loss": 4.761874140772408, |
|
"tokens_seen": 101580800 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0004861452619633062, |
|
"loss": 3.5329, |
|
"theoretical_loss": 4.741049741962473, |
|
"tokens_seen": 104857600 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0004855541895214677, |
|
"loss": 3.5329, |
|
"theoretical_loss": 4.721042093249051, |
|
"tokens_seen": 108134400 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0004849631170796293, |
|
"loss": 3.4926, |
|
"theoretical_loss": 4.701795828231866, |
|
"tokens_seen": 111411200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0004843720446377908, |
|
"loss": 3.4835, |
|
"theoretical_loss": 4.68326082423593, |
|
"tokens_seen": 114688000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00048378097219595233, |
|
"loss": 3.4587, |
|
"theoretical_loss": 4.665391572426282, |
|
"tokens_seen": 117964800 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00048318989975411385, |
|
"loss": 3.4307, |
|
"theoretical_loss": 4.648146638719739, |
|
"tokens_seen": 121241600 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00048259882731227537, |
|
"loss": 3.443, |
|
"theoretical_loss": 4.631488200339643, |
|
"tokens_seen": 124518400 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0004820077548704369, |
|
"loss": 3.45, |
|
"theoretical_loss": 4.615381645715717, |
|
"tokens_seen": 127795200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00048141668242859847, |
|
"loss": 3.3857, |
|
"theoretical_loss": 4.599795227690505, |
|
"tokens_seen": 131072000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00048082560998676, |
|
"loss": 3.4065, |
|
"theoretical_loss": 4.584699761792674, |
|
"tokens_seen": 134348800 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0004802345375449215, |
|
"loss": 3.3659, |
|
"theoretical_loss": 4.570068362778516, |
|
"tokens_seen": 137625600 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00047964346510308303, |
|
"loss": 3.3781, |
|
"theoretical_loss": 4.555876213804037, |
|
"tokens_seen": 140902400 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00047905239266124455, |
|
"loss": 3.3612, |
|
"theoretical_loss": 4.542100363530799, |
|
"tokens_seen": 144179200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00047846132021940607, |
|
"loss": 3.3402, |
|
"theoretical_loss": 4.528719547234816, |
|
"tokens_seen": 147456000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0004778702477775676, |
|
"loss": 3.2899, |
|
"theoretical_loss": 4.515714028614996, |
|
"tokens_seen": 150732800 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00047727917533572917, |
|
"loss": 3.3099, |
|
"theoretical_loss": 4.503065459513339, |
|
"tokens_seen": 154009600 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0004766881028938907, |
|
"loss": 3.3162, |
|
"theoretical_loss": 4.4907567551852665, |
|
"tokens_seen": 157286400 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0004760970304520522, |
|
"loss": 3.3036, |
|
"theoretical_loss": 4.478771983111967, |
|
"tokens_seen": 160563200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 104000, |
|
"objective/train/instantaneous_batch_size": 32, |
|
"objective/train/instantaneous_microbatch_size": 32768, |
|
"objective/train/original_loss": 3.2747654914855957, |
|
"objective/train/theoretical_loss": 4.467096263641219, |
|
"objective/train/tokens_used": 184300000, |
|
"theoretical_loss": 4.467096263641219, |
|
"tokens_seen": 163840000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00047550595801021373, |
|
"loss": 3.2837, |
|
"theoretical_loss": 4.467096263641219, |
|
"tokens_seen": 163840000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00047491488556837525, |
|
"loss": 3.2955, |
|
"theoretical_loss": 4.455715680989545, |
|
"tokens_seen": 167116800 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00047432381312653677, |
|
"loss": 3.2608, |
|
"theoretical_loss": 4.44461720334543, |
|
"tokens_seen": 170393600 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00047373274068469835, |
|
"loss": 3.2026, |
|
"theoretical_loss": 4.433788610987646, |
|
"tokens_seen": 173670400 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00047314166824285987, |
|
"loss": 3.2621, |
|
"theoretical_loss": 4.42321843148016, |
|
"tokens_seen": 176947200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0004725505958010214, |
|
"loss": 3.1999, |
|
"theoretical_loss": 4.412895881130142, |
|
"tokens_seen": 180224000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0004719595233591829, |
|
"loss": 3.2384, |
|
"theoretical_loss": 4.4028108120020795, |
|
"tokens_seen": 183500800 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00047136845091734443, |
|
"loss": 3.244, |
|
"theoretical_loss": 4.392953663871862, |
|
"tokens_seen": 186777600 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00047077737847550595, |
|
"loss": 3.2329, |
|
"theoretical_loss": 4.383315420582533, |
|
"tokens_seen": 190054400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0004701863060336675, |
|
"loss": 3.2634, |
|
"theoretical_loss": 4.373887570330275, |
|
"tokens_seen": 193331200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00046959523359182905, |
|
"loss": 3.2143, |
|
"theoretical_loss": 4.364662069466704, |
|
"tokens_seen": 196608000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00046900416114999057, |
|
"loss": 3.2128, |
|
"theoretical_loss": 4.355631309453283, |
|
"tokens_seen": 199884800 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0004684130887081521, |
|
"loss": 3.1675, |
|
"theoretical_loss": 4.346788086646671, |
|
"tokens_seen": 203161600 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0004678220162663136, |
|
"loss": 3.1967, |
|
"theoretical_loss": 4.33812557463116, |
|
"tokens_seen": 206438400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00046723094382447513, |
|
"loss": 3.2042, |
|
"theoretical_loss": 4.329637298846812, |
|
"tokens_seen": 209715200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00046663987138263665, |
|
"loss": 3.1574, |
|
"theoretical_loss": 4.321317113290252, |
|
"tokens_seen": 212992000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0004660487989407982, |
|
"loss": 3.1317, |
|
"theoretical_loss": 4.3131591790897925, |
|
"tokens_seen": 216268800 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00046545772649895975, |
|
"loss": 3.1829, |
|
"theoretical_loss": 4.305157944778228, |
|
"tokens_seen": 219545600 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00046486665405712127, |
|
"loss": 3.2073, |
|
"theoretical_loss": 4.297308128105687, |
|
"tokens_seen": 222822400 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0004642755816152828, |
|
"loss": 3.1994, |
|
"theoretical_loss": 4.2896046992515995, |
|
"tokens_seen": 226099200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0004636845091734443, |
|
"loss": 3.2312, |
|
"theoretical_loss": 4.282042865309616, |
|
"tokens_seen": 229376000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00046309343673160583, |
|
"loss": 3.2006, |
|
"theoretical_loss": 4.274618055932298, |
|
"tokens_seen": 232652800 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0004625023642897674, |
|
"loss": 3.1756, |
|
"theoretical_loss": 4.267325910033897, |
|
"tokens_seen": 235929600 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0004619112918479289, |
|
"loss": 3.1158, |
|
"theoretical_loss": 4.260162263459744, |
|
"tokens_seen": 239206400 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00046132021940609044, |
|
"loss": 3.1448, |
|
"theoretical_loss": 4.253123137539814, |
|
"tokens_seen": 242483200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00046072914696425197, |
|
"loss": 3.166, |
|
"theoretical_loss": 4.246204728452055, |
|
"tokens_seen": 245760000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0004601380745224135, |
|
"loss": 3.1762, |
|
"theoretical_loss": 4.239403397328261, |
|
"tokens_seen": 249036800 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000459547002080575, |
|
"loss": 3.1442, |
|
"theoretical_loss": 4.232715661041632, |
|
"tokens_seen": 252313600 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0004589559296387366, |
|
"loss": 3.1504, |
|
"theoretical_loss": 4.226138183620867, |
|
"tokens_seen": 255590400 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0004583648571968981, |
|
"loss": 3.1099, |
|
"theoretical_loss": 4.219667768240775, |
|
"tokens_seen": 258867200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0004577737847550596, |
|
"loss": 3.114, |
|
"theoretical_loss": 4.213301349743924, |
|
"tokens_seen": 262144000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0004571827123132211, |
|
"loss": 3.0703, |
|
"theoretical_loss": 4.20703598765197, |
|
"tokens_seen": 265420800 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0004565916398713826, |
|
"loss": 3.0383, |
|
"theoretical_loss": 4.2008688596290025, |
|
"tokens_seen": 268697600 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00045600056742954413, |
|
"loss": 2.9881, |
|
"theoretical_loss": 4.194797255362549, |
|
"tokens_seen": 271974400 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0004554094949877057, |
|
"loss": 3.068, |
|
"theoretical_loss": 4.188818570830883, |
|
"tokens_seen": 275251200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00045481842254586723, |
|
"loss": 3.0823, |
|
"theoretical_loss": 4.182930302927963, |
|
"tokens_seen": 278528000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00045422735010402875, |
|
"loss": 3.0393, |
|
"theoretical_loss": 4.17713004441978, |
|
"tokens_seen": 281804800 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00045363627766219027, |
|
"loss": 3.0775, |
|
"theoretical_loss": 4.1714154792080915, |
|
"tokens_seen": 285081600 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0004530452052203518, |
|
"loss": 3.0876, |
|
"theoretical_loss": 4.165784377879517, |
|
"tokens_seen": 288358400 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0004524541327785133, |
|
"loss": 3.0473, |
|
"theoretical_loss": 4.160234593519768, |
|
"tokens_seen": 291635200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00045186306033667483, |
|
"loss": 3.0706, |
|
"theoretical_loss": 4.15476405777444, |
|
"tokens_seen": 294912000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0004512719878948364, |
|
"loss": 3.0424, |
|
"theoretical_loss": 4.149370777139286, |
|
"tokens_seen": 298188800 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00045068091545299793, |
|
"loss": 3.04, |
|
"theoretical_loss": 4.144052829464249, |
|
"tokens_seen": 301465600 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00045008984301115945, |
|
"loss": 3.0118, |
|
"theoretical_loss": 4.138808360656742, |
|
"tokens_seen": 304742400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00044949877056932097, |
|
"loss": 3.0093, |
|
"theoretical_loss": 4.133635581570836, |
|
"tokens_seen": 308019200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0004489076981274825, |
|
"loss": 2.9812, |
|
"theoretical_loss": 4.128532765070004, |
|
"tokens_seen": 311296000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000448316625685644, |
|
"loss": 3.0051, |
|
"theoretical_loss": 4.123498243252032, |
|
"tokens_seen": 314572800 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0004477255532438056, |
|
"loss": 2.9872, |
|
"theoretical_loss": 4.118530404825556, |
|
"tokens_seen": 317849600 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0004471344808019671, |
|
"loss": 3.0065, |
|
"theoretical_loss": 4.113627692628464, |
|
"tokens_seen": 321126400 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00044654340836012863, |
|
"loss": 3.0122, |
|
"theoretical_loss": 4.108788601279149, |
|
"tokens_seen": 324403200 |
|
}, |
|
{ |
|
"debugging/Self-BLEU-5": 0.5365128506817183, |
|
"debugging/distinct-1-grams": 0.7612814402327299, |
|
"debugging/distinct-2-grams": 0.9694583753853511, |
|
"debugging/entropy-1-grams": 6.003629944255698, |
|
"debugging/entropy-2-grams": 7.054987089269872, |
|
"debugging/length": 495.25, |
|
"debugging/num_segments": 16, |
|
"epoch": 0.12, |
|
"objective/train/docs_used": 197327, |
|
"objective/train/instantaneous_batch_size": 32, |
|
"objective/train/instantaneous_microbatch_size": 32768, |
|
"objective/train/original_loss": 2.9225306510925293, |
|
"objective/train/theoretical_loss": 4.10401167495222, |
|
"objective/train/tokens_used": 348140000, |
|
"theoretical_loss": 4.10401167495222, |
|
"tokens_seen": 327680000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00044595233591829015, |
|
"loss": 3.0423, |
|
"theoretical_loss": 4.10401167495222, |
|
"tokens_seen": 327680000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00044536126347645167, |
|
"loss": 3.041, |
|
"theoretical_loss": 4.099295505270921, |
|
"tokens_seen": 330956800 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0004447701910346132, |
|
"loss": 2.9867, |
|
"theoretical_loss": 4.094638729309031, |
|
"tokens_seen": 334233600 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00044417911859277476, |
|
"loss": 2.9992, |
|
"theoretical_loss": 4.090040027695556, |
|
"tokens_seen": 337510400 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0004435880461509363, |
|
"loss": 2.9584, |
|
"theoretical_loss": 4.085498122815992, |
|
"tokens_seen": 340787200 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0004429969737090978, |
|
"loss": 2.9834, |
|
"theoretical_loss": 4.081011777104333, |
|
"tokens_seen": 344064000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0004424059012672593, |
|
"loss": 2.9805, |
|
"theoretical_loss": 4.076579791420469, |
|
"tokens_seen": 347340800 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00044181482882542085, |
|
"loss": 2.9998, |
|
"theoretical_loss": 4.0722010035079155, |
|
"tokens_seen": 350617600 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00044122375638358237, |
|
"loss": 2.9906, |
|
"theoretical_loss": 4.067874286527197, |
|
"tokens_seen": 353894400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0004406326839417439, |
|
"loss": 2.9923, |
|
"theoretical_loss": 4.063598547660519, |
|
"tokens_seen": 357171200 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00044004161149990546, |
|
"loss": 3.0199, |
|
"theoretical_loss": 4.05937272678363, |
|
"tokens_seen": 360448000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000439450539058067, |
|
"loss": 3.0079, |
|
"theoretical_loss": 4.055195795201069, |
|
"tokens_seen": 363724800 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0004388594666162285, |
|
"loss": 2.9757, |
|
"theoretical_loss": 4.051066754441235, |
|
"tokens_seen": 367001600 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00043826839417439, |
|
"loss": 2.9698, |
|
"theoretical_loss": 4.04698463510794, |
|
"tokens_seen": 370278400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00043767732173255155, |
|
"loss": 2.9735, |
|
"theoretical_loss": 4.042948495785312, |
|
"tokens_seen": 373555200 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00043708624929071307, |
|
"loss": 2.9327, |
|
"theoretical_loss": 4.038957421993153, |
|
"tokens_seen": 376832000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00043649517684887464, |
|
"loss": 2.9425, |
|
"theoretical_loss": 4.035010525189982, |
|
"tokens_seen": 380108800 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00043590410440703616, |
|
"loss": 2.9746, |
|
"theoretical_loss": 4.031106941821218, |
|
"tokens_seen": 383385600 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0004353130319651977, |
|
"loss": 2.9371, |
|
"theoretical_loss": 4.027245832410079, |
|
"tokens_seen": 386662400 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0004347219595233592, |
|
"loss": 2.9136, |
|
"theoretical_loss": 4.023426380688943, |
|
"tokens_seen": 389939200 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0004341308870815207, |
|
"loss": 2.9586, |
|
"theoretical_loss": 4.019647792769048, |
|
"tokens_seen": 393216000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00043353981463968225, |
|
"loss": 3.0316, |
|
"theoretical_loss": 4.015909296346521, |
|
"tokens_seen": 396492800 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0004329487421978438, |
|
"loss": 2.974, |
|
"theoretical_loss": 4.012210139942894, |
|
"tokens_seen": 399769600 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00043235766975600534, |
|
"loss": 3.0154, |
|
"theoretical_loss": 4.008549592178291, |
|
"tokens_seen": 403046400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00043176659731416686, |
|
"loss": 2.9866, |
|
"theoretical_loss": 4.004926941075674, |
|
"tokens_seen": 406323200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0004311755248723284, |
|
"loss": 3.0274, |
|
"theoretical_loss": 4.001341493394558, |
|
"tokens_seen": 409600000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0004305844524304899, |
|
"loss": 3.0314, |
|
"theoretical_loss": 3.997792573992726, |
|
"tokens_seen": 412876800 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0004299933799886514, |
|
"loss": 2.9786, |
|
"theoretical_loss": 3.994279525214554, |
|
"tokens_seen": 416153600 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00042940230754681295, |
|
"loss": 3.0252, |
|
"theoretical_loss": 3.990801706304647, |
|
"tokens_seen": 419430400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0004288112351049745, |
|
"loss": 2.9866, |
|
"theoretical_loss": 3.987358492845532, |
|
"tokens_seen": 422707200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000428220162663136, |
|
"loss": 3.0081, |
|
"theoretical_loss": 3.9839492762182647, |
|
"tokens_seen": 425984000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0004276290902212975, |
|
"loss": 2.9837, |
|
"theoretical_loss": 3.9805734630848306, |
|
"tokens_seen": 429260800 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00042703801777945903, |
|
"loss": 2.9656, |
|
"theoretical_loss": 3.9772304748913054, |
|
"tokens_seen": 432537600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00042644694533762055, |
|
"loss": 2.9527, |
|
"theoretical_loss": 3.973919747390801, |
|
"tokens_seen": 435814400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00042585587289578207, |
|
"loss": 2.9704, |
|
"theoretical_loss": 3.9706407301852487, |
|
"tokens_seen": 439091200 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00042526480045394365, |
|
"loss": 2.9481, |
|
"theoretical_loss": 3.9673928862851655, |
|
"tokens_seen": 442368000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00042467372801210517, |
|
"loss": 2.9408, |
|
"theoretical_loss": 3.9641756916865463, |
|
"tokens_seen": 445644800 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0004240826555702667, |
|
"loss": 2.9444, |
|
"theoretical_loss": 3.960988634964113, |
|
"tokens_seen": 448921600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0004234915831284282, |
|
"loss": 2.9224, |
|
"theoretical_loss": 3.9578312168801597, |
|
"tokens_seen": 452198400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00042290051068658973, |
|
"loss": 2.9187, |
|
"theoretical_loss": 3.954702950008308, |
|
"tokens_seen": 455475200 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00042230943824475125, |
|
"loss": 2.9027, |
|
"theoretical_loss": 3.9516033583714734, |
|
"tokens_seen": 458752000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0004217183658029128, |
|
"loss": 2.9277, |
|
"theoretical_loss": 3.9485319770934355, |
|
"tokens_seen": 462028800 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00042112729336107435, |
|
"loss": 2.9109, |
|
"theoretical_loss": 3.945488352063391, |
|
"tokens_seen": 465305600 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00042053622091923587, |
|
"loss": 2.8861, |
|
"theoretical_loss": 3.942472039612926, |
|
"tokens_seen": 468582400 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0004199451484773974, |
|
"loss": 2.8901, |
|
"theoretical_loss": 3.939482606204863, |
|
"tokens_seen": 471859200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0004193540760355589, |
|
"loss": 2.9065, |
|
"theoretical_loss": 3.936519628133466, |
|
"tokens_seen": 475136000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00041876300359372043, |
|
"loss": 2.9422, |
|
"theoretical_loss": 3.9335826912355114, |
|
"tokens_seen": 478412800 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00041817193115188195, |
|
"loss": 2.9805, |
|
"theoretical_loss": 3.93067139061177, |
|
"tokens_seen": 481689600 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0004175808587100435, |
|
"loss": 2.9047, |
|
"theoretical_loss": 3.927785330358441, |
|
"tokens_seen": 484966400 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00041698978626820505, |
|
"loss": 2.8916, |
|
"theoretical_loss": 3.9249241233081333, |
|
"tokens_seen": 488243200 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"objective/train/docs_used": 287192, |
|
"objective/train/instantaneous_batch_size": 32, |
|
"objective/train/instantaneous_microbatch_size": 32768, |
|
"objective/train/original_loss": 3.0162453651428223, |
|
"objective/train/theoretical_loss": 3.92208739077998, |
|
"objective/train/tokens_used": 511980000, |
|
"theoretical_loss": 3.92208739077998, |
|
"tokens_seen": 491520000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00041639871382636657, |
|
"loss": 2.8641, |
|
"theoretical_loss": 3.92208739077998, |
|
"tokens_seen": 491520000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0004158076413845281, |
|
"loss": 2.8749, |
|
"theoretical_loss": 3.919274762338519, |
|
"tokens_seen": 494796800 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0004152165689426896, |
|
"loss": 2.8998, |
|
"theoretical_loss": 3.9164858755609613, |
|
"tokens_seen": 498073600 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00041462549650085113, |
|
"loss": 2.8889, |
|
"theoretical_loss": 3.9137203758125176, |
|
"tokens_seen": 501350400 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0004140344240590127, |
|
"loss": 2.8474, |
|
"theoretical_loss": 3.910977916029439, |
|
"tokens_seen": 504627200 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0004134433516171742, |
|
"loss": 2.8623, |
|
"theoretical_loss": 3.908258156509472, |
|
"tokens_seen": 507904000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00041285227917533575, |
|
"loss": 2.85, |
|
"theoretical_loss": 3.905560764709417, |
|
"tokens_seen": 511180800 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00041226120673349727, |
|
"loss": 2.8285, |
|
"theoretical_loss": 3.9028854150495143, |
|
"tokens_seen": 514457600 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0004116701342916588, |
|
"loss": 2.8533, |
|
"theoretical_loss": 3.9002317887243834, |
|
"tokens_seen": 517734400 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0004110790618498203, |
|
"loss": 2.8568, |
|
"theoretical_loss": 3.897599573520247, |
|
"tokens_seen": 521011200 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0004104879894079819, |
|
"loss": 2.8888, |
|
"theoretical_loss": 3.8949884636382106, |
|
"tokens_seen": 524288000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0004098969169661434, |
|
"loss": 2.8691, |
|
"theoretical_loss": 3.892398159523345, |
|
"tokens_seen": 527564800 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0004093058445243049, |
|
"loss": 2.9266, |
|
"theoretical_loss": 3.889828367699349, |
|
"tokens_seen": 530841600 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00040871477208246645, |
|
"loss": 2.8666, |
|
"theoretical_loss": 3.8872788006085894, |
|
"tokens_seen": 534118400 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00040812369964062797, |
|
"loss": 2.8944, |
|
"theoretical_loss": 3.8847491764572926, |
|
"tokens_seen": 537395200 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0004075326271987895, |
|
"loss": 2.8746, |
|
"theoretical_loss": 3.882239219065708, |
|
"tokens_seen": 540672000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000406941554756951, |
|
"loss": 2.9009, |
|
"theoretical_loss": 3.879748657723039, |
|
"tokens_seen": 543948800 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0004063504823151126, |
|
"loss": 2.9279, |
|
"theoretical_loss": 3.8772772270469824, |
|
"tokens_seen": 547225600 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0004057594098732741, |
|
"loss": 2.8889, |
|
"theoretical_loss": 3.8748246668476827, |
|
"tokens_seen": 550502400 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0004051683374314356, |
|
"loss": 2.9038, |
|
"theoretical_loss": 3.8723907219959486, |
|
"tokens_seen": 553779200 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00040457726498959715, |
|
"loss": 2.9203, |
|
"theoretical_loss": 3.869975142295573, |
|
"tokens_seen": 557056000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00040398619254775867, |
|
"loss": 2.9695, |
|
"theoretical_loss": 3.8675776823595998, |
|
"tokens_seen": 560332800 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00040340694155475694, |
|
"loss": 2.9455, |
|
"theoretical_loss": 3.8651981014904027, |
|
"tokens_seen": 563609600 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0004028158691129185, |
|
"loss": 2.9359, |
|
"theoretical_loss": 3.8628361635634265, |
|
"tokens_seen": 566886400 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00040222479667108003, |
|
"loss": 2.9886, |
|
"theoretical_loss": 3.8604916369144666, |
|
"tokens_seen": 570163200 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00040163372422924155, |
|
"loss": 2.9385, |
|
"theoretical_loss": 3.858164294230354, |
|
"tokens_seen": 573440000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0004010426517874031, |
|
"loss": 2.9009, |
|
"theoretical_loss": 3.85585391244293, |
|
"tokens_seen": 576716800 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0004004515793455646, |
|
"loss": 2.8993, |
|
"theoretical_loss": 3.8535602726261864, |
|
"tokens_seen": 579993600 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0003998605069037261, |
|
"loss": 2.9056, |
|
"theoretical_loss": 3.851283159896468, |
|
"tokens_seen": 583270400 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0003992694344618877, |
|
"loss": 2.8781, |
|
"theoretical_loss": 3.8490223633156173, |
|
"tokens_seen": 586547200 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0003986783620200492, |
|
"loss": 2.8614, |
|
"theoretical_loss": 3.846777675796974, |
|
"tokens_seen": 589824000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00039808728957821073, |
|
"loss": 2.8569, |
|
"theoretical_loss": 3.844548894014116, |
|
"tokens_seen": 593100800 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00039749621713637225, |
|
"loss": 2.8681, |
|
"theoretical_loss": 3.8423358183122582, |
|
"tokens_seen": 596377600 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0003969051446945338, |
|
"loss": 2.8386, |
|
"theoretical_loss": 3.840138252622208, |
|
"tokens_seen": 599654400 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0003963140722526953, |
|
"loss": 2.877, |
|
"theoretical_loss": 3.837956004376799, |
|
"tokens_seen": 602931200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00039572299981085687, |
|
"loss": 2.8372, |
|
"theoretical_loss": 3.8357888844297094, |
|
"tokens_seen": 606208000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0003951319273690184, |
|
"loss": 2.8638, |
|
"theoretical_loss": 3.8336367069765958, |
|
"tokens_seen": 609484800 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0003945408549271799, |
|
"loss": 2.8071, |
|
"theoretical_loss": 3.8314992894784536, |
|
"tokens_seen": 612761600 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00039394978248534143, |
|
"loss": 2.8512, |
|
"theoretical_loss": 3.829376452587134, |
|
"tokens_seen": 616038400 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00039335871004350295, |
|
"loss": 2.8688, |
|
"theoretical_loss": 3.827268020072948, |
|
"tokens_seen": 619315200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0003927676376016645, |
|
"loss": 2.8993, |
|
"theoretical_loss": 3.8251738187542843, |
|
"tokens_seen": 622592000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000392176565159826, |
|
"loss": 2.8856, |
|
"theoretical_loss": 3.8230936784291787, |
|
"tokens_seen": 625868800 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00039158549271798757, |
|
"loss": 2.918, |
|
"theoretical_loss": 3.8210274318087656, |
|
"tokens_seen": 629145600 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0003909944202761491, |
|
"loss": 2.8609, |
|
"theoretical_loss": 3.818974914452557, |
|
"tokens_seen": 632422400 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00039040334783431056, |
|
"loss": 2.8562, |
|
"theoretical_loss": 3.8169359647054835, |
|
"tokens_seen": 635699200 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0003898122753924721, |
|
"loss": 2.8437, |
|
"theoretical_loss": 3.8149104236366433, |
|
"tokens_seen": 638976000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0003892212029506336, |
|
"loss": 2.8228, |
|
"theoretical_loss": 3.8128981349797098, |
|
"tokens_seen": 642252800 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0003886301305087951, |
|
"loss": 2.8529, |
|
"theoretical_loss": 3.8108989450749293, |
|
"tokens_seen": 645529600 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0003880390580669567, |
|
"loss": 2.8384, |
|
"theoretical_loss": 3.8089127028126764, |
|
"tokens_seen": 648806400 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0003874479856251182, |
|
"loss": 2.8938, |
|
"theoretical_loss": 3.8069392595785083, |
|
"tokens_seen": 652083200 |
|
}, |
|
{ |
|
"debugging/Self-BLEU-5": 0.5265375629586004, |
|
"debugging/distinct-1-grams": 0.7435820408094715, |
|
"debugging/distinct-2-grams": 0.9558103821233092, |
|
"debugging/entropy-1-grams": 5.931434510687563, |
|
"debugging/entropy-2-grams": 6.886416755326388, |
|
"debugging/length": 521.9230769230769, |
|
"debugging/num_segments": 13, |
|
"epoch": 0.23, |
|
"objective/train/docs_used": 379091, |
|
"objective/train/instantaneous_batch_size": 32, |
|
"objective/train/instantaneous_microbatch_size": 32768, |
|
"objective/train/original_loss": 3.0659255981445312, |
|
"objective/train/theoretical_loss": 3.804978469199669, |
|
"objective/train/tokens_used": 675820000, |
|
"theoretical_loss": 3.804978469199669, |
|
"tokens_seen": 655360000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00038685691318327974, |
|
"loss": 2.881, |
|
"theoretical_loss": 3.804978469199669, |
|
"tokens_seen": 655360000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00038626584074144126, |
|
"loss": 2.888, |
|
"theoretical_loss": 3.803030187893005, |
|
"tokens_seen": 658636800 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0003856747682996028, |
|
"loss": 2.8765, |
|
"theoretical_loss": 3.8010942742142415, |
|
"tokens_seen": 661913600 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0003850836958577643, |
|
"loss": 2.9087, |
|
"theoretical_loss": 3.799170589008585, |
|
"tokens_seen": 665190400 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0003844926234159259, |
|
"loss": 2.8934, |
|
"theoretical_loss": 3.7972589953626006, |
|
"tokens_seen": 668467200 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0003839015509740874, |
|
"loss": 2.9056, |
|
"theoretical_loss": 3.795359358557337, |
|
"tokens_seen": 671744000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0003833104785322489, |
|
"loss": 2.8333, |
|
"theoretical_loss": 3.79347154602265, |
|
"tokens_seen": 675020800 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00038271940609041044, |
|
"loss": 2.8867, |
|
"theoretical_loss": 3.7915954272926955, |
|
"tokens_seen": 678297600 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00038212833364857196, |
|
"loss": 2.8426, |
|
"theoretical_loss": 3.789730873962557, |
|
"tokens_seen": 681574400 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0003815372612067335, |
|
"loss": 2.788, |
|
"theoretical_loss": 3.787877759645963, |
|
"tokens_seen": 684851200 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000380946188764895, |
|
"loss": 2.8154, |
|
"theoretical_loss": 3.7860359599340776, |
|
"tokens_seen": 688128000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0003803551163230566, |
|
"loss": 2.8512, |
|
"theoretical_loss": 3.784205352355321, |
|
"tokens_seen": 691404800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0003797640438812181, |
|
"loss": 2.8795, |
|
"theoretical_loss": 3.782385816336189, |
|
"tokens_seen": 694681600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0003791729714393796, |
|
"loss": 2.8301, |
|
"theoretical_loss": 3.7805772331630516, |
|
"tokens_seen": 697958400 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00037858189899754114, |
|
"loss": 2.8087, |
|
"theoretical_loss": 3.7787794859448898, |
|
"tokens_seen": 701235200 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00037799082655570266, |
|
"loss": 2.7913, |
|
"theoretical_loss": 3.7769924595769546, |
|
"tokens_seen": 704512000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0003773997541138642, |
|
"loss": 2.788, |
|
"theoretical_loss": 3.7752160407053115, |
|
"tokens_seen": 707788800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00037680868167202575, |
|
"loss": 2.7631, |
|
"theoretical_loss": 3.7734501176922493, |
|
"tokens_seen": 711065600 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0003762176092301873, |
|
"loss": 2.8143, |
|
"theoretical_loss": 3.7716945805825337, |
|
"tokens_seen": 714342400 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0003756265367883488, |
|
"loss": 2.8485, |
|
"theoretical_loss": 3.7699493210704667, |
|
"tokens_seen": 717619200 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0003750354643465103, |
|
"loss": 2.8257, |
|
"theoretical_loss": 3.7682142324677455, |
|
"tokens_seen": 720896000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00037444439190467184, |
|
"loss": 2.8877, |
|
"theoretical_loss": 3.7664892096720886, |
|
"tokens_seen": 724172800 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00037385331946283336, |
|
"loss": 2.8216, |
|
"theoretical_loss": 3.7647741491366067, |
|
"tokens_seen": 727449600 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00037326224702099493, |
|
"loss": 2.8345, |
|
"theoretical_loss": 3.7630689488399027, |
|
"tokens_seen": 730726400 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00037267117457915645, |
|
"loss": 2.8008, |
|
"theoretical_loss": 3.7613735082568764, |
|
"tokens_seen": 734003200 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000372080102137318, |
|
"loss": 2.8302, |
|
"theoretical_loss": 3.759687728330217, |
|
"tokens_seen": 737280000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0003714890296954795, |
|
"loss": 2.8284, |
|
"theoretical_loss": 3.75801151144256, |
|
"tokens_seen": 740556800 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000370897957253641, |
|
"loss": 2.8069, |
|
"theoretical_loss": 3.756344761389295, |
|
"tokens_seen": 743833600 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00037030688481180254, |
|
"loss": 2.7824, |
|
"theoretical_loss": 3.754687383352003, |
|
"tokens_seen": 747110400 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00036971581236996406, |
|
"loss": 2.7891, |
|
"theoretical_loss": 3.7530392838725097, |
|
"tokens_seen": 750387200 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00036912473992812563, |
|
"loss": 2.8107, |
|
"theoretical_loss": 3.751400370827529, |
|
"tokens_seen": 753664000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00036853366748628715, |
|
"loss": 2.8383, |
|
"theoretical_loss": 3.749770553403895, |
|
"tokens_seen": 756940800 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0003679425950444487, |
|
"loss": 2.7993, |
|
"theoretical_loss": 3.748149742074355, |
|
"tokens_seen": 760217600 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0003673515226026102, |
|
"loss": 2.8346, |
|
"theoretical_loss": 3.746537848573908, |
|
"tokens_seen": 763494400 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0003667604501607717, |
|
"loss": 2.8492, |
|
"theoretical_loss": 3.744934785876686, |
|
"tokens_seen": 766771200 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00036616937771893324, |
|
"loss": 2.8489, |
|
"theoretical_loss": 3.7433404681733475, |
|
"tokens_seen": 770048000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0003655783052770948, |
|
"loss": 2.8544, |
|
"theoretical_loss": 3.7417548108489846, |
|
"tokens_seen": 773324800 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00036498723283525633, |
|
"loss": 2.8264, |
|
"theoretical_loss": 3.740177730461517, |
|
"tokens_seen": 776601600 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00036439616039341785, |
|
"loss": 2.8743, |
|
"theoretical_loss": 3.73860914472057, |
|
"tokens_seen": 779878400 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0003638050879515794, |
|
"loss": 2.8407, |
|
"theoretical_loss": 3.7370489724668197, |
|
"tokens_seen": 783155200 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0003632140155097409, |
|
"loss": 2.8535, |
|
"theoretical_loss": 3.735497133651788, |
|
"tokens_seen": 786432000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0003626229430679024, |
|
"loss": 2.836, |
|
"theoretical_loss": 3.733953549318091, |
|
"tokens_seen": 789708800 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000362031870626064, |
|
"loss": 2.8547, |
|
"theoretical_loss": 3.7324181415801094, |
|
"tokens_seen": 792985600 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00036144079818422546, |
|
"loss": 2.8486, |
|
"theoretical_loss": 3.7308908336050814, |
|
"tokens_seen": 796262400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000360849725742387, |
|
"loss": 2.8293, |
|
"theoretical_loss": 3.729371549594614, |
|
"tokens_seen": 799539200 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0003602586533005485, |
|
"loss": 2.8396, |
|
"theoretical_loss": 3.7278602147665776, |
|
"tokens_seen": 802816000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00035966758085871, |
|
"loss": 2.8218, |
|
"theoretical_loss": 3.726356755337407, |
|
"tokens_seen": 806092800 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00035907650841687154, |
|
"loss": 2.8109, |
|
"theoretical_loss": 3.724861098504767, |
|
"tokens_seen": 809369600 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0003584972574238699, |
|
"loss": 2.7916, |
|
"theoretical_loss": 3.7233731724305974, |
|
"tokens_seen": 812646400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00035790618498203144, |
|
"loss": 2.8143, |
|
"theoretical_loss": 3.7218929062245105, |
|
"tokens_seen": 815923200 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"objective/train/docs_used": 471128, |
|
"objective/train/instantaneous_batch_size": 32, |
|
"objective/train/instantaneous_microbatch_size": 32768, |
|
"objective/train/original_loss": 2.6718621253967285, |
|
"objective/train/theoretical_loss": 3.7204202299275475, |
|
"objective/train/tokens_used": 839660000, |
|
"theoretical_loss": 3.7204202299275475, |
|
"tokens_seen": 819200000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00035731511254019296, |
|
"loss": 2.7586, |
|
"theoretical_loss": 3.7204202299275475, |
|
"tokens_seen": 819200000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0003567240400983545, |
|
"loss": 2.78, |
|
"theoretical_loss": 3.7189550744962707, |
|
"tokens_seen": 822476800 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000356132967656516, |
|
"loss": 2.762, |
|
"theoretical_loss": 3.717497371787192, |
|
"tokens_seen": 825753600 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0003555418952146775, |
|
"loss": 2.7573, |
|
"theoretical_loss": 3.7160470545415274, |
|
"tokens_seen": 829030400 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00035495082277283904, |
|
"loss": 2.7581, |
|
"theoretical_loss": 3.714604056370267, |
|
"tokens_seen": 832307200 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0003543597503310006, |
|
"loss": 2.7728, |
|
"theoretical_loss": 3.713168311739558, |
|
"tokens_seen": 835584000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00035376867788916214, |
|
"loss": 2.7538, |
|
"theoretical_loss": 3.7117397559563843, |
|
"tokens_seen": 838860800 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00035317760544732366, |
|
"loss": 2.7468, |
|
"theoretical_loss": 3.710318325154545, |
|
"tokens_seen": 842137600 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0003525865330054852, |
|
"loss": 2.7912, |
|
"theoretical_loss": 3.7089039562809223, |
|
"tokens_seen": 845414400 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00035199546056364665, |
|
"loss": 2.7636, |
|
"theoretical_loss": 3.7074965870820193, |
|
"tokens_seen": 848691200 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00035140438812180817, |
|
"loss": 2.7608, |
|
"theoretical_loss": 3.7060961560907857, |
|
"tokens_seen": 851968000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00035081331567996974, |
|
"loss": 2.8238, |
|
"theoretical_loss": 3.7047026026137, |
|
"tokens_seen": 855244800 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00035022224323813127, |
|
"loss": 2.7831, |
|
"theoretical_loss": 3.7033158667181154, |
|
"tokens_seen": 858521600 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0003496311707962928, |
|
"loss": 2.8149, |
|
"theoretical_loss": 3.701935889219863, |
|
"tokens_seen": 861798400 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0003490400983544543, |
|
"loss": 2.7787, |
|
"theoretical_loss": 3.7005626116710966, |
|
"tokens_seen": 865075200 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00034844902591261583, |
|
"loss": 2.7875, |
|
"theoretical_loss": 3.69919597634839, |
|
"tokens_seen": 868352000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00034785795347077735, |
|
"loss": 2.765, |
|
"theoretical_loss": 3.6978359262410603, |
|
"tokens_seen": 871628800 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0003472668810289389, |
|
"loss": 2.7931, |
|
"theoretical_loss": 3.6964824050397276, |
|
"tokens_seen": 874905600 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00034667580858710044, |
|
"loss": 2.7302, |
|
"theoretical_loss": 3.6951353571251015, |
|
"tokens_seen": 878182400 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00034608473614526196, |
|
"loss": 2.7389, |
|
"theoretical_loss": 3.693794727556988, |
|
"tokens_seen": 881459200 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0003454936637034235, |
|
"loss": 2.7457, |
|
"theoretical_loss": 3.692460462063506, |
|
"tokens_seen": 884736000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000344902591261585, |
|
"loss": 2.8004, |
|
"theoretical_loss": 3.691132507030521, |
|
"tokens_seen": 888012800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00034431151881974653, |
|
"loss": 2.8096, |
|
"theoretical_loss": 3.6898108094912816, |
|
"tokens_seen": 891289600 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00034372044637790805, |
|
"loss": 2.7667, |
|
"theoretical_loss": 3.6884953171162556, |
|
"tokens_seen": 894566400 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0003431293739360696, |
|
"loss": 2.7718, |
|
"theoretical_loss": 3.6871859782031624, |
|
"tokens_seen": 897843200 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00034253830149423114, |
|
"loss": 2.7894, |
|
"theoretical_loss": 3.685882741667202, |
|
"tokens_seen": 901120000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00034194722905239266, |
|
"loss": 2.7886, |
|
"theoretical_loss": 3.684585557031461, |
|
"tokens_seen": 904396800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0003413561566105542, |
|
"loss": 2.8096, |
|
"theoretical_loss": 3.6832943744175126, |
|
"tokens_seen": 907673600 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0003407650841687157, |
|
"loss": 2.7731, |
|
"theoretical_loss": 3.682009144536188, |
|
"tokens_seen": 910950400 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0003401740117268772, |
|
"loss": 2.7988, |
|
"theoretical_loss": 3.680729818678526, |
|
"tokens_seen": 914227200 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0003395829392850388, |
|
"loss": 2.775, |
|
"theoretical_loss": 3.6794563487068936, |
|
"tokens_seen": 917504000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0003389918668432003, |
|
"loss": 2.7833, |
|
"theoretical_loss": 3.6781886870462692, |
|
"tokens_seen": 920780800 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00033840079440136184, |
|
"loss": 2.8013, |
|
"theoretical_loss": 3.676926786675698, |
|
"tokens_seen": 924057600 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00033780972195952336, |
|
"loss": 2.7383, |
|
"theoretical_loss": 3.6756706011198963, |
|
"tokens_seen": 927334400 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0003372186495176849, |
|
"loss": 2.7058, |
|
"theoretical_loss": 3.6744200844410217, |
|
"tokens_seen": 930611200 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0003366275770758464, |
|
"loss": 2.7064, |
|
"theoretical_loss": 3.6731751912305914, |
|
"tokens_seen": 933888000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000336036504634008, |
|
"loss": 2.7417, |
|
"theoretical_loss": 3.671935876601547, |
|
"tokens_seen": 937164800 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0003354454321921695, |
|
"loss": 2.6845, |
|
"theoretical_loss": 3.6707020961804715, |
|
"tokens_seen": 940441600 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000334854359750331, |
|
"loss": 2.7397, |
|
"theoretical_loss": 3.6694738060999468, |
|
"tokens_seen": 943718400 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00033426328730849254, |
|
"loss": 2.7525, |
|
"theoretical_loss": 3.668250962991049, |
|
"tokens_seen": 946995200 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00033367221486665406, |
|
"loss": 2.757, |
|
"theoretical_loss": 3.667033523975983, |
|
"tokens_seen": 950272000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0003330811424248156, |
|
"loss": 2.7335, |
|
"theoretical_loss": 3.66582144666085, |
|
"tokens_seen": 953548800 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0003324900699829771, |
|
"loss": 2.7369, |
|
"theoretical_loss": 3.664614689128546, |
|
"tokens_seen": 956825600 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0003318989975411387, |
|
"loss": 2.7314, |
|
"theoretical_loss": 3.6634132099317886, |
|
"tokens_seen": 960102400 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0003313079250993002, |
|
"loss": 2.7448, |
|
"theoretical_loss": 3.662216968086267, |
|
"tokens_seen": 963379200 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0003307168526574617, |
|
"loss": 2.7734, |
|
"theoretical_loss": 3.6610259230639217, |
|
"tokens_seen": 966656000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00033012578021562324, |
|
"loss": 2.8147, |
|
"theoretical_loss": 3.659840034786333, |
|
"tokens_seen": 969932800 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00032953470777378476, |
|
"loss": 2.8769, |
|
"theoretical_loss": 3.6586592636182376, |
|
"tokens_seen": 973209600 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0003289436353319463, |
|
"loss": 2.866, |
|
"theoretical_loss": 3.6574835703611566, |
|
"tokens_seen": 976486400 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00032835256289010786, |
|
"loss": 2.8567, |
|
"theoretical_loss": 3.6563129162471313, |
|
"tokens_seen": 979763200 |
|
}, |
|
{ |
|
"debugging/Self-BLEU-5": 0.4286046663919377, |
|
"debugging/distinct-1-grams": 0.8147567798871364, |
|
"debugging/distinct-2-grams": 0.9823269374342457, |
|
"debugging/entropy-1-grams": 6.1671920556004824, |
|
"debugging/entropy-2-grams": 6.947028138756313, |
|
"debugging/length": 477.53333333333336, |
|
"debugging/num_segments": 15, |
|
"epoch": 0.35, |
|
"objective/train/docs_used": 560408, |
|
"objective/train/instantaneous_batch_size": 32, |
|
"objective/train/instantaneous_microbatch_size": 32768, |
|
"objective/train/original_loss": 2.931519031524658, |
|
"objective/train/theoretical_loss": 3.6551472629325787, |
|
"objective/train/tokens_used": 1003500000, |
|
"theoretical_loss": 3.6551472629325787, |
|
"tokens_seen": 983040000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0003277614904482694, |
|
"loss": 2.8554, |
|
"theoretical_loss": 3.6551472629325787, |
|
"tokens_seen": 983040000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0003271704180064309, |
|
"loss": 2.8527, |
|
"theoretical_loss": 3.653986572492247, |
|
"tokens_seen": 986316800 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0003265793455645924, |
|
"loss": 2.8227, |
|
"theoretical_loss": 3.65283080741328, |
|
"tokens_seen": 989593600 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00032598827312275394, |
|
"loss": 2.834, |
|
"theoretical_loss": 3.6516799305893866, |
|
"tokens_seen": 992870400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00032539720068091546, |
|
"loss": 2.8239, |
|
"theoretical_loss": 3.6505339053151076, |
|
"tokens_seen": 996147200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00032480612823907704, |
|
"loss": 2.8332, |
|
"theoretical_loss": 3.649392695280186, |
|
"tokens_seen": 999424000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00032421505579723856, |
|
"loss": 2.7962, |
|
"theoretical_loss": 3.6482562645640337, |
|
"tokens_seen": 1002700800 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0003236239833554001, |
|
"loss": 2.8049, |
|
"theoretical_loss": 3.6471245776302883, |
|
"tokens_seen": 1005977600 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00032303291091356155, |
|
"loss": 2.837, |
|
"theoretical_loss": 3.6459975993214724, |
|
"tokens_seen": 1009254400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00032244183847172307, |
|
"loss": 2.8015, |
|
"theoretical_loss": 3.6448752948537377, |
|
"tokens_seen": 1012531200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0003218507660298846, |
|
"loss": 2.8375, |
|
"theoretical_loss": 3.6437576298116996, |
|
"tokens_seen": 1015808000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0003212596935880461, |
|
"loss": 2.8268, |
|
"theoretical_loss": 3.6426445701433607, |
|
"tokens_seen": 1019084800 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0003206686211462077, |
|
"loss": 2.8573, |
|
"theoretical_loss": 3.6415360821551226, |
|
"tokens_seen": 1022361600 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0003200775487043692, |
|
"loss": 2.8034, |
|
"theoretical_loss": 3.6404321325068754, |
|
"tokens_seen": 1025638400 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0003194864762625307, |
|
"loss": 2.8282, |
|
"theoretical_loss": 3.639332688207178, |
|
"tokens_seen": 1028915200 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00031889540382069225, |
|
"loss": 2.8054, |
|
"theoretical_loss": 3.6382377166085096, |
|
"tokens_seen": 1032192000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00031830433137885377, |
|
"loss": 2.76, |
|
"theoretical_loss": 3.6371471854026147, |
|
"tokens_seen": 1035468800 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0003177132589370153, |
|
"loss": 2.7697, |
|
"theoretical_loss": 3.6360610626159087, |
|
"tokens_seen": 1038745600 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00031712218649517686, |
|
"loss": 2.7396, |
|
"theoretical_loss": 3.634979316604973, |
|
"tokens_seen": 1042022400 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0003165311140533384, |
|
"loss": 2.7543, |
|
"theoretical_loss": 3.6339019160521198, |
|
"tokens_seen": 1045299200 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0003159400416114999, |
|
"loss": 2.7494, |
|
"theoretical_loss": 3.632828829961029, |
|
"tokens_seen": 1048576000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0003153489691696614, |
|
"loss": 2.7597, |
|
"theoretical_loss": 3.631760027652461, |
|
"tokens_seen": 1051852800 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00031475789672782295, |
|
"loss": 2.7859, |
|
"theoretical_loss": 3.630695478760034, |
|
"tokens_seen": 1055129600 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00031416682428598447, |
|
"loss": 2.7618, |
|
"theoretical_loss": 3.6296351532260767, |
|
"tokens_seen": 1058406400 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00031357575184414604, |
|
"loss": 2.7557, |
|
"theoretical_loss": 3.6285790212975435, |
|
"tokens_seen": 1061683200 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00031298467940230756, |
|
"loss": 2.7322, |
|
"theoretical_loss": 3.6275270535220008, |
|
"tokens_seen": 1064960000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0003123936069604691, |
|
"loss": 2.7615, |
|
"theoretical_loss": 3.626479220743673, |
|
"tokens_seen": 1068236800 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0003118379988651409, |
|
"loss": 2.7742, |
|
"theoretical_loss": 3.6254354940995586, |
|
"tokens_seen": 1071513600 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00031124692642330243, |
|
"loss": 2.7746, |
|
"theoretical_loss": 3.624395845015602, |
|
"tokens_seen": 1074790400 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00031065585398146395, |
|
"loss": 2.7173, |
|
"theoretical_loss": 3.6233602452029348, |
|
"tokens_seen": 1078067200 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00031006478153962553, |
|
"loss": 2.7272, |
|
"theoretical_loss": 3.6223286666541683, |
|
"tokens_seen": 1081344000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00030947370909778705, |
|
"loss": 2.7576, |
|
"theoretical_loss": 3.621301081639753, |
|
"tokens_seen": 1084620800 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00030888263665594857, |
|
"loss": 2.7948, |
|
"theoretical_loss": 3.6202774627043923, |
|
"tokens_seen": 1087897600 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0003082915642141101, |
|
"loss": 2.752, |
|
"theoretical_loss": 3.619257782663513, |
|
"tokens_seen": 1091174400 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0003077004917722716, |
|
"loss": 2.7598, |
|
"theoretical_loss": 3.618242014599793, |
|
"tokens_seen": 1094451200 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00030710941933043313, |
|
"loss": 2.7445, |
|
"theoretical_loss": 3.617230131859743, |
|
"tokens_seen": 1097728000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00030651834688859465, |
|
"loss": 2.7298, |
|
"theoretical_loss": 3.6162221080503416, |
|
"tokens_seen": 1101004800 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00030592727444675623, |
|
"loss": 2.7152, |
|
"theoretical_loss": 3.615217917035726, |
|
"tokens_seen": 1104281600 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00030533620200491775, |
|
"loss": 2.7476, |
|
"theoretical_loss": 3.614217532933929, |
|
"tokens_seen": 1107558400 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00030474512956307927, |
|
"loss": 2.735, |
|
"theoretical_loss": 3.6132209301136715, |
|
"tokens_seen": 1110835200 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0003041540571212408, |
|
"loss": 2.7189, |
|
"theoretical_loss": 3.612228083191205, |
|
"tokens_seen": 1114112000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0003035629846794023, |
|
"loss": 2.7788, |
|
"theoretical_loss": 3.611238967027199, |
|
"tokens_seen": 1117388800 |
|
} |
|
], |
|
"max_steps": 42724, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 5.71514655080448e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|