Transformers
PyTorch
English
gpt2
Generated from Trainer
text-generation-inference
Inference Endpoints
pedantic_bhabha / checkpoint-500 /trainer_state.json
tomekkorbak's picture
Training in progress, step 500
07b8b79
raw history blame
No virus
97.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.009929697740000794,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 9.92063492063492e-07,
"loss": 10.8568,
"theoretical_loss": 20.812814784551147,
"tokens_seen": 65536
},
{
"epoch": 0.0,
"learning_rate": 1.984126984126984e-06,
"loss": 10.8727,
"theoretical_loss": 17.56619808733264,
"tokens_seen": 131072
},
{
"epoch": 0.0,
"learning_rate": 2.9761904761904763e-06,
"loss": 10.7356,
"theoretical_loss": 15.939474075840563,
"tokens_seen": 196608
},
{
"epoch": 0.0,
"learning_rate": 3.968253968253968e-06,
"loss": 10.7113,
"theoretical_loss": 14.892313738992565,
"tokens_seen": 262144
},
{
"epoch": 0.0,
"learning_rate": 4.96031746031746e-06,
"loss": 10.3498,
"theoretical_loss": 14.13621392076697,
"tokens_seen": 327680
},
{
"epoch": 0.0,
"learning_rate": 5.9523809523809525e-06,
"loss": 10.1826,
"theoretical_loss": 13.552558455554218,
"tokens_seen": 393216
},
{
"epoch": 0.0,
"learning_rate": 6.944444444444444e-06,
"loss": 10.0176,
"theoretical_loss": 13.081805984405184,
"tokens_seen": 458752
},
{
"epoch": 0.0,
"learning_rate": 7.936507936507936e-06,
"loss": 9.6515,
"theoretical_loss": 12.690126608487319,
"tokens_seen": 524288
},
{
"epoch": 0.0,
"learning_rate": 8.928571428571428e-06,
"loss": 9.2598,
"theoretical_loss": 12.356589446877619,
"tokens_seen": 589824
},
{
"epoch": 0.0,
"learning_rate": 9.92063492063492e-06,
"loss": 9.1219,
"theoretical_loss": 12.067409590039073,
"tokens_seen": 655360
},
{
"epoch": 0.0,
"learning_rate": 1.0912698412698412e-05,
"loss": 9.2934,
"theoretical_loss": 11.813063214105672,
"tokens_seen": 720896
},
{
"epoch": 0.0,
"learning_rate": 1.1904761904761905e-05,
"loss": 9.0973,
"theoretical_loss": 11.586716191710725,
"tokens_seen": 786432
},
{
"epoch": 0.0,
"learning_rate": 1.2896825396825396e-05,
"loss": 9.4804,
"theoretical_loss": 11.383311123190783,
"tokens_seen": 851968
},
{
"epoch": 0.0,
"learning_rate": 1.3888888888888888e-05,
"loss": 9.3208,
"theoretical_loss": 11.199008685115867,
"tokens_seen": 917504
},
{
"epoch": 0.0,
"learning_rate": 1.4880952380952381e-05,
"loss": 9.1864,
"theoretical_loss": 11.030830900981908,
"tokens_seen": 983040
},
{
"epoch": 0.0,
"learning_rate": 1.5873015873015872e-05,
"loss": 9.398,
"theoretical_loss": 10.876425069460945,
"tokens_seen": 1048576
},
{
"epoch": 0.0,
"learning_rate": 1.6865079365079364e-05,
"loss": 8.9394,
"theoretical_loss": 10.73390272306672,
"tokens_seen": 1114112
},
{
"epoch": 0.0,
"learning_rate": 1.7857142857142855e-05,
"loss": 9.0636,
"theoretical_loss": 10.601726859234274,
"tokens_seen": 1179648
},
{
"epoch": 0.0,
"learning_rate": 1.884920634920635e-05,
"loss": 9.1583,
"theoretical_loss": 10.478631155360636,
"tokens_seen": 1245184
},
{
"epoch": 0.0,
"learning_rate": 1.984126984126984e-05,
"loss": 9.1124,
"theoretical_loss": 10.363560926767326,
"tokens_seen": 1310720
},
{
"epoch": 0.0,
"learning_rate": 2.0833333333333333e-05,
"loss": 9.0087,
"theoretical_loss": 10.25562920390074,
"tokens_seen": 1376256
},
{
"epoch": 0.0,
"learning_rate": 2.1825396825396824e-05,
"loss": 9.0788,
"theoretical_loss": 10.154083536274014,
"tokens_seen": 1441792
},
{
"epoch": 0.0,
"learning_rate": 2.2817460317460315e-05,
"loss": 8.4406,
"theoretical_loss": 10.058280544736593,
"tokens_seen": 1507328
},
{
"epoch": 0.0,
"learning_rate": 2.380952380952381e-05,
"loss": 8.9754,
"theoretical_loss": 9.967666161844274,
"tokens_seen": 1572864
},
{
"epoch": 0.0,
"objective/train/docs_used": 12267,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 8.854658126831055,
"objective/train/theoretical_loss": 9.881760109397105,
"objective/train/tokens_used": 22098400,
"theoretical_loss": 9.881760109397105,
"tokens_seen": 1638400
},
{
"epoch": 0.0,
"learning_rate": 2.48015873015873e-05,
"loss": 8.9164,
"theoretical_loss": 9.881760109397105,
"tokens_seen": 1638400
},
{
"epoch": 0.0,
"learning_rate": 2.5793650793650793e-05,
"loss": 7.9452,
"theoretical_loss": 9.800143574544556,
"tokens_seen": 1703936
},
{
"epoch": 0.0,
"learning_rate": 2.6785714285714284e-05,
"loss": 8.81,
"theoretical_loss": 9.722449329911441,
"tokens_seen": 1769472
},
{
"epoch": 0.0,
"learning_rate": 2.7777777777777776e-05,
"loss": 9.0741,
"theoretical_loss": 9.648353742085542,
"tokens_seen": 1835008
},
{
"epoch": 0.0,
"learning_rate": 2.876984126984127e-05,
"loss": 8.8256,
"theoretical_loss": 9.577570254149634,
"tokens_seen": 1900544
},
{
"epoch": 0.0,
"learning_rate": 2.9761904761904762e-05,
"loss": 8.3274,
"theoretical_loss": 9.509844029768848,
"tokens_seen": 1966080
},
{
"epoch": 0.0,
"learning_rate": 3.075396825396825e-05,
"loss": 8.55,
"theoretical_loss": 9.44494752063593,
"tokens_seen": 2031616
},
{
"epoch": 0.0,
"learning_rate": 3.1746031746031745e-05,
"loss": 8.0979,
"theoretical_loss": 9.382676773914453,
"tokens_seen": 2097152
},
{
"epoch": 0.0,
"learning_rate": 3.273809523809524e-05,
"loss": 8.3374,
"theoretical_loss": 9.322848337237977,
"tokens_seen": 2162688
},
{
"epoch": 0.0,
"learning_rate": 3.373015873015873e-05,
"loss": 8.5177,
"theoretical_loss": 9.26529664966427,
"tokens_seen": 2228224
},
{
"epoch": 0.0,
"learning_rate": 3.472222222222222e-05,
"loss": 8.6686,
"theoretical_loss": 9.209871830448751,
"tokens_seen": 2293760
},
{
"epoch": 0.0,
"learning_rate": 3.571428571428571e-05,
"loss": 8.675,
"theoretical_loss": 9.156437795512288,
"tokens_seen": 2359296
},
{
"epoch": 0.0,
"learning_rate": 3.670634920634921e-05,
"loss": 8.4694,
"theoretical_loss": 9.104870645417346,
"tokens_seen": 2424832
},
{
"epoch": 0.0,
"learning_rate": 3.76984126984127e-05,
"loss": 8.2407,
"theoretical_loss": 9.05505727953773,
"tokens_seen": 2490368
},
{
"epoch": 0.0,
"learning_rate": 3.8690476190476195e-05,
"loss": 8.5934,
"theoretical_loss": 9.006894199647824,
"tokens_seen": 2555904
},
{
"epoch": 0.0,
"learning_rate": 3.968253968253968e-05,
"loss": 7.8491,
"theoretical_loss": 8.960286472913351,
"tokens_seen": 2621440
},
{
"epoch": 0.0,
"learning_rate": 4.067460317460318e-05,
"loss": 7.8615,
"theoretical_loss": 8.915146829644607,
"tokens_seen": 2686976
},
{
"epoch": 0.0,
"learning_rate": 4.1666666666666665e-05,
"loss": 7.7589,
"theoretical_loss": 8.871394875482219,
"tokens_seen": 2752512
},
{
"epoch": 0.0,
"learning_rate": 4.265873015873016e-05,
"loss": 8.0014,
"theoretical_loss": 8.828956401157495,
"tokens_seen": 2818048
},
{
"epoch": 0.0,
"learning_rate": 4.365079365079365e-05,
"loss": 7.8309,
"theoretical_loss": 8.787762775782408,
"tokens_seen": 2883584
},
{
"epoch": 0.0,
"learning_rate": 4.464285714285714e-05,
"loss": 8.259,
"theoretical_loss": 8.747750411915451,
"tokens_seen": 2949120
},
{
"epoch": 0.0,
"learning_rate": 4.563492063492063e-05,
"loss": 8.1107,
"theoretical_loss": 8.708860292524829,
"tokens_seen": 3014656
},
{
"epoch": 0.0,
"learning_rate": 4.6626984126984126e-05,
"loss": 8.2947,
"theoretical_loss": 8.671037551512843,
"tokens_seen": 3080192
},
{
"epoch": 0.0,
"learning_rate": 4.761904761904762e-05,
"loss": 7.764,
"theoretical_loss": 8.63423110073947,
"tokens_seen": 3145728
},
{
"epoch": 0.0,
"learning_rate": 4.8611111111111115e-05,
"loss": 8.1678,
"theoretical_loss": 8.59839329754032,
"tokens_seen": 3211264
},
{
"epoch": 0.0,
"objective/train/docs_used": 13483,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 8.108365058898926,
"objective/train/theoretical_loss": 8.563479647615063,
"objective/train/tokens_used": 23736800,
"theoretical_loss": 8.563479647615063,
"tokens_seen": 3276800
},
{
"epoch": 0.0,
"learning_rate": 4.96031746031746e-05,
"loss": 7.5801,
"theoretical_loss": 8.563479647615063,
"tokens_seen": 3276800
},
{
"epoch": 0.0,
"learning_rate": 5.05952380952381e-05,
"loss": 7.8434,
"theoretical_loss": 8.52944853889911,
"tokens_seen": 3342336
},
{
"epoch": 0.0,
"learning_rate": 5.1587301587301586e-05,
"loss": 7.8492,
"theoretical_loss": 8.496261002649998,
"tokens_seen": 3407872
},
{
"epoch": 0.0,
"learning_rate": 5.257936507936508e-05,
"loss": 7.8312,
"theoretical_loss": 8.463880498501183,
"tokens_seen": 3473408
},
{
"epoch": 0.0,
"learning_rate": 5.357142857142857e-05,
"loss": 7.3015,
"theoretical_loss": 8.432272720676774,
"tokens_seen": 3538944
},
{
"epoch": 0.0,
"learning_rate": 5.4563492063492063e-05,
"loss": 7.9474,
"theoretical_loss": 8.401405422934712,
"tokens_seen": 3604480
},
{
"epoch": 0.0,
"learning_rate": 5.555555555555555e-05,
"loss": 7.6148,
"theoretical_loss": 8.371248260124204,
"tokens_seen": 3670016
},
{
"epoch": 0.0,
"learning_rate": 5.6547619047619046e-05,
"loss": 7.5108,
"theoretical_loss": 8.341772644515071,
"tokens_seen": 3735552
},
{
"epoch": 0.0,
"learning_rate": 5.753968253968254e-05,
"loss": 7.6168,
"theoretical_loss": 8.312951615289325,
"tokens_seen": 3801088
},
{
"epoch": 0.0,
"learning_rate": 5.8531746031746036e-05,
"loss": 7.8436,
"theoretical_loss": 8.284759719785177,
"tokens_seen": 3866624
},
{
"epoch": 0.0,
"learning_rate": 5.9523809523809524e-05,
"loss": 7.0651,
"theoretical_loss": 8.257172905255858,
"tokens_seen": 3932160
},
{
"epoch": 0.0,
"learning_rate": 6.051587301587302e-05,
"loss": 7.4859,
"theoretical_loss": 8.23016842005411,
"tokens_seen": 3997696
},
{
"epoch": 0.0,
"learning_rate": 6.15079365079365e-05,
"loss": 7.531,
"theoretical_loss": 8.203724723281965,
"tokens_seen": 4063232
},
{
"epoch": 0.0,
"learning_rate": 6.25e-05,
"loss": 7.5815,
"theoretical_loss": 8.177821402057042,
"tokens_seen": 4128768
},
{
"epoch": 0.0,
"learning_rate": 6.349206349206349e-05,
"loss": 7.2471,
"theoretical_loss": 8.152439095643611,
"tokens_seen": 4194304
},
{
"epoch": 0.0,
"learning_rate": 6.448412698412699e-05,
"loss": 7.4488,
"theoretical_loss": 8.127559425781495,
"tokens_seen": 4259840
},
{
"epoch": 0.0,
"learning_rate": 6.547619047619048e-05,
"loss": 7.4517,
"theoretical_loss": 8.103164932619705,
"tokens_seen": 4325376
},
{
"epoch": 0.0,
"learning_rate": 6.646825396825397e-05,
"loss": 7.1039,
"theoretical_loss": 8.079239015726635,
"tokens_seen": 4390912
},
{
"epoch": 0.0,
"learning_rate": 6.746031746031745e-05,
"loss": 7.1279,
"theoretical_loss": 8.055765879705412,
"tokens_seen": 4456448
},
{
"epoch": 0.0,
"learning_rate": 6.845238095238096e-05,
"loss": 7.3354,
"theoretical_loss": 8.032730483993003,
"tokens_seen": 4521984
},
{
"epoch": 0.0,
"learning_rate": 6.944444444444444e-05,
"loss": 7.1485,
"theoretical_loss": 8.01011849646583,
"tokens_seen": 4587520
},
{
"epoch": 0.0,
"learning_rate": 7.043650793650793e-05,
"loss": 6.9851,
"theoretical_loss": 7.987916250513374,
"tokens_seen": 4653056
},
{
"epoch": 0.0,
"learning_rate": 7.142857142857142e-05,
"loss": 6.9432,
"theoretical_loss": 7.966110705275796,
"tokens_seen": 4718592
},
{
"epoch": 0.0,
"learning_rate": 7.242063492063492e-05,
"loss": 6.9933,
"theoretical_loss": 7.944689408771984,
"tokens_seen": 4784128
},
{
"epoch": 0.0,
"learning_rate": 7.341269841269842e-05,
"loss": 7.0325,
"theoretical_loss": 7.923640463671514,
"tokens_seen": 4849664
},
{
"epoch": 0.0,
"objective/train/docs_used": 14033,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 6.057826519012451,
"objective/train/theoretical_loss": 7.902952495488062,
"objective/train/tokens_used": 25375200,
"theoretical_loss": 7.902952495488062,
"tokens_seen": 4915200
},
{
"epoch": 0.0,
"learning_rate": 7.440476190476191e-05,
"loss": 6.7791,
"theoretical_loss": 7.902952495488062,
"tokens_seen": 4915200
},
{
"epoch": 0.0,
"learning_rate": 7.53968253968254e-05,
"loss": 6.7978,
"theoretical_loss": 7.882614622993199,
"tokens_seen": 4980736
},
{
"epoch": 0.0,
"learning_rate": 7.63888888888889e-05,
"loss": 6.8558,
"theoretical_loss": 7.8626164306686235,
"tokens_seen": 5046272
},
{
"epoch": 0.0,
"learning_rate": 7.738095238095239e-05,
"loss": 6.9141,
"theoretical_loss": 7.842947943031932,
"tokens_seen": 5111808
},
{
"epoch": 0.0,
"learning_rate": 7.837301587301588e-05,
"loss": 6.9255,
"theoretical_loss": 7.8235996006863076,
"tokens_seen": 5177344
},
{
"epoch": 0.0,
"learning_rate": 7.936507936507937e-05,
"loss": 6.7895,
"theoretical_loss": 7.8045622379581605,
"tokens_seen": 5242880
},
{
"epoch": 0.0,
"learning_rate": 8.035714285714287e-05,
"loss": 6.7059,
"theoretical_loss": 7.785827061999068,
"tokens_seen": 5308416
},
{
"epoch": 0.0,
"learning_rate": 8.134920634920635e-05,
"loss": 6.815,
"theoretical_loss": 7.767385633239359,
"tokens_seen": 5373952
},
{
"epoch": 0.0,
"learning_rate": 8.234126984126984e-05,
"loss": 6.8445,
"theoretical_loss": 7.749229847090614,
"tokens_seen": 5439488
},
{
"epoch": 0.0,
"learning_rate": 8.333333333333333e-05,
"loss": 6.7849,
"theoretical_loss": 7.731351916803314,
"tokens_seen": 5505024
},
{
"epoch": 0.0,
"learning_rate": 8.432539682539683e-05,
"loss": 6.5349,
"theoretical_loss": 7.7137443573939155,
"tokens_seen": 5570560
},
{
"epoch": 0.0,
"learning_rate": 8.531746031746032e-05,
"loss": 6.5103,
"theoretical_loss": 7.696399970562929,
"tokens_seen": 5636096
},
{
"epoch": 0.0,
"learning_rate": 8.630952380952381e-05,
"loss": 6.9307,
"theoretical_loss": 7.679311830532177,
"tokens_seen": 5701632
},
{
"epoch": 0.0,
"learning_rate": 8.73015873015873e-05,
"loss": 6.8115,
"theoretical_loss": 7.662473270735324,
"tokens_seen": 5767168
},
{
"epoch": 0.0,
"learning_rate": 8.82936507936508e-05,
"loss": 6.6601,
"theoretical_loss": 7.645877871301275,
"tokens_seen": 5832704
},
{
"epoch": 0.0,
"learning_rate": 8.928571428571429e-05,
"loss": 6.2238,
"theoretical_loss": 7.6295194472748555,
"tokens_seen": 5898240
},
{
"epoch": 0.0,
"learning_rate": 9.027777777777777e-05,
"loss": 6.6176,
"theoretical_loss": 7.613392037523692,
"tokens_seen": 5963776
},
{
"epoch": 0.0,
"learning_rate": 9.126984126984126e-05,
"loss": 6.5932,
"theoretical_loss": 7.597489894284275,
"tokens_seen": 6029312
},
{
"epoch": 0.0,
"learning_rate": 9.226190476190476e-05,
"loss": 6.51,
"theoretical_loss": 7.581807473303883,
"tokens_seen": 6094848
},
{
"epoch": 0.0,
"learning_rate": 9.325396825396825e-05,
"loss": 6.3768,
"theoretical_loss": 7.566339424538366,
"tokens_seen": 6160384
},
{
"epoch": 0.0,
"learning_rate": 9.424603174603175e-05,
"loss": 6.5465,
"theoretical_loss": 7.551080583368945,
"tokens_seen": 6225920
},
{
"epoch": 0.0,
"learning_rate": 9.523809523809524e-05,
"loss": 6.8931,
"theoretical_loss": 7.536025962303915,
"tokens_seen": 6291456
},
{
"epoch": 0.0,
"learning_rate": 9.623015873015874e-05,
"loss": 6.5266,
"theoretical_loss": 7.521170743133757,
"tokens_seen": 6356992
},
{
"epoch": 0.0,
"learning_rate": 9.722222222222223e-05,
"loss": 6.4856,
"theoretical_loss": 7.506510269510493,
"tokens_seen": 6422528
},
{
"epoch": 0.0,
"learning_rate": 9.821428571428572e-05,
"loss": 6.4763,
"theoretical_loss": 7.492040039924245,
"tokens_seen": 6488064
},
{
"epoch": 0.0,
"objective/train/docs_used": 15357,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 6.699707984924316,
"objective/train/theoretical_loss": 7.4777557010520255,
"objective/train/tokens_used": 27013600,
"theoretical_loss": 7.4777557010520255,
"tokens_seen": 6553600
},
{
"epoch": 0.0,
"learning_rate": 9.92063492063492e-05,
"loss": 6.6532,
"theoretical_loss": 7.4777557010520255,
"tokens_seen": 6553600
},
{
"epoch": 0.0,
"learning_rate": 0.00010019841269841271,
"loss": 6.4,
"theoretical_loss": 7.463653041455457,
"tokens_seen": 6619136
},
{
"epoch": 0.0,
"learning_rate": 0.0001011904761904762,
"loss": 6.2462,
"theoretical_loss": 7.449727985605911,
"tokens_seen": 6684672
},
{
"epoch": 0.0,
"learning_rate": 0.00010218253968253968,
"loss": 6.6399,
"theoretical_loss": 7.435976588217015,
"tokens_seen": 6750208
},
{
"epoch": 0.0,
"learning_rate": 0.00010317460317460317,
"loss": 6.1887,
"theoretical_loss": 7.4223950288659,
"tokens_seen": 6815744
},
{
"epoch": 0.0,
"learning_rate": 0.00010416666666666667,
"loss": 6.4129,
"theoretical_loss": 7.40897960688587,
"tokens_seen": 6881280
},
{
"epoch": 0.0,
"learning_rate": 0.00010515873015873016,
"loss": 6.2296,
"theoretical_loss": 7.395726736514341,
"tokens_seen": 6946816
},
{
"epoch": 0.0,
"learning_rate": 0.00010615079365079365,
"loss": 6.3505,
"theoretical_loss": 7.3826329422810275,
"tokens_seen": 7012352
},
{
"epoch": 0.0,
"learning_rate": 0.00010714285714285714,
"loss": 6.5043,
"theoretical_loss": 7.369694854622368,
"tokens_seen": 7077888
},
{
"epoch": 0.0,
"learning_rate": 0.00010813492063492064,
"loss": 6.3689,
"theoretical_loss": 7.3569092057090835,
"tokens_seen": 7143424
},
{
"epoch": 0.0,
"learning_rate": 0.00010912698412698413,
"loss": 6.5583,
"theoretical_loss": 7.3442728254746825,
"tokens_seen": 7208960
},
{
"epoch": 0.0,
"learning_rate": 0.00011011904761904761,
"loss": 6.3757,
"theoretical_loss": 7.3317826378335145,
"tokens_seen": 7274496
},
{
"epoch": 0.0,
"learning_rate": 0.0001111111111111111,
"loss": 6.3164,
"theoretical_loss": 7.319435657077673,
"tokens_seen": 7340032
},
{
"epoch": 0.0,
"learning_rate": 0.0001121031746031746,
"loss": 6.205,
"theoretical_loss": 7.307228984442819,
"tokens_seen": 7405568
},
{
"epoch": 0.0,
"learning_rate": 0.00011309523809523809,
"loss": 6.4975,
"theoretical_loss": 7.29515980483356,
"tokens_seen": 7471104
},
{
"epoch": 0.0,
"learning_rate": 0.00011408730158730158,
"loss": 6.087,
"theoretical_loss": 7.283225383699648,
"tokens_seen": 7536640
},
{
"epoch": 0.0,
"learning_rate": 0.00011507936507936508,
"loss": 6.0785,
"theoretical_loss": 7.271423064054828,
"tokens_seen": 7602176
},
{
"epoch": 0.0,
"learning_rate": 0.00011607142857142858,
"loss": 6.4949,
"theoretical_loss": 7.259750263630618,
"tokens_seen": 7667712
},
{
"epoch": 0.0,
"learning_rate": 0.00011706349206349207,
"loss": 6.2155,
"theoretical_loss": 7.248204472157866,
"tokens_seen": 7733248
},
{
"epoch": 0.0,
"learning_rate": 0.00011805555555555556,
"loss": 6.0229,
"theoretical_loss": 7.236783248769257,
"tokens_seen": 7798784
},
{
"epoch": 0.0,
"learning_rate": 0.00011904761904761905,
"loss": 6.6244,
"theoretical_loss": 7.225484219516492,
"tokens_seen": 7864320
},
{
"epoch": 0.0,
"learning_rate": 0.00012003968253968255,
"loss": 5.9387,
"theoretical_loss": 7.2143050749961155,
"tokens_seen": 7929856
},
{
"epoch": 0.0,
"learning_rate": 0.00012103174603174604,
"loss": 6.2842,
"theoretical_loss": 7.2032435680783955,
"tokens_seen": 7995392
},
{
"epoch": 0.0,
"learning_rate": 0.00012202380952380953,
"loss": 6.2047,
"theoretical_loss": 7.19229751173401,
"tokens_seen": 8060928
},
{
"epoch": 0.0,
"learning_rate": 0.000123015873015873,
"loss": 6.2571,
"theoretical_loss": 7.181464776953511,
"tokens_seen": 8126464
},
{
"epoch": 0.0,
"objective/train/docs_used": 16711,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 6.2001729011535645,
"objective/train/theoretical_loss": 7.1707432907549595,
"objective/train/tokens_used": 28652000,
"theoretical_loss": 7.1707432907549595,
"tokens_seen": 8192000
},
{
"epoch": 0.0,
"learning_rate": 0.0001240079365079365,
"loss": 6.0533,
"theoretical_loss": 7.1707432907549595,
"tokens_seen": 8192000
},
{
"epoch": 0.0,
"learning_rate": 0.000125,
"loss": 6.0526,
"theoretical_loss": 7.160131034275267,
"tokens_seen": 8257536
},
{
"epoch": 0.0,
"learning_rate": 0.0001259920634920635,
"loss": 6.0532,
"theoretical_loss": 7.149626040941133,
"tokens_seen": 8323072
},
{
"epoch": 0.0,
"learning_rate": 0.00012698412698412698,
"loss": 5.9719,
"theoretical_loss": 7.139226394715633,
"tokens_seen": 8388608
},
{
"epoch": 0.0,
"learning_rate": 0.00012797619047619048,
"loss": 6.0403,
"theoretical_loss": 7.12893022841679,
"tokens_seen": 8454144
},
{
"epoch": 0.0,
"learning_rate": 0.00012896825396825398,
"loss": 5.7847,
"theoretical_loss": 7.118735722104612,
"tokens_seen": 8519680
},
{
"epoch": 0.0,
"learning_rate": 0.00012996031746031748,
"loss": 5.9094,
"theoretical_loss": 7.10864110153334,
"tokens_seen": 8585216
},
{
"epoch": 0.0,
"learning_rate": 0.00013095238095238096,
"loss": 6.0872,
"theoretical_loss": 7.098644636665766,
"tokens_seen": 8650752
},
{
"epoch": 0.0,
"learning_rate": 0.00013194444444444446,
"loss": 6.0185,
"theoretical_loss": 7.088744640246688,
"tokens_seen": 8716288
},
{
"epoch": 0.0,
"learning_rate": 0.00013293650793650793,
"loss": 6.0768,
"theoretical_loss": 7.078939466432745,
"tokens_seen": 8781824
},
{
"epoch": 0.0,
"learning_rate": 0.00013392857142857144,
"loss": 6.3022,
"theoretical_loss": 7.069227509475962,
"tokens_seen": 8847360
},
{
"epoch": 0.0,
"learning_rate": 0.0001349206349206349,
"loss": 6.0881,
"theoretical_loss": 7.059607202458563,
"tokens_seen": 8912896
},
{
"epoch": 0.0,
"learning_rate": 0.0001359126984126984,
"loss": 5.7381,
"theoretical_loss": 7.0500770160766635,
"tokens_seen": 8978432
},
{
"epoch": 0.0,
"learning_rate": 0.0001369047619047619,
"loss": 5.9916,
"theoretical_loss": 7.04063545747062,
"tokens_seen": 9043968
},
{
"epoch": 0.0,
"learning_rate": 0.00013789682539682541,
"loss": 6.0983,
"theoretical_loss": 7.031281069099929,
"tokens_seen": 9109504
},
{
"epoch": 0.0,
"learning_rate": 0.0001388888888888889,
"loss": 6.0662,
"theoretical_loss": 7.022012427660673,
"tokens_seen": 9175040
},
{
"epoch": 0.0,
"learning_rate": 0.0001398809523809524,
"loss": 5.7078,
"theoretical_loss": 7.012828143043604,
"tokens_seen": 9240576
},
{
"epoch": 0.0,
"learning_rate": 0.00014087301587301586,
"loss": 6.2455,
"theoretical_loss": 7.003726857331067,
"tokens_seen": 9306112
},
{
"epoch": 0.0,
"learning_rate": 0.00014186507936507937,
"loss": 5.9609,
"theoretical_loss": 6.994707243831053,
"tokens_seen": 9371648
},
{
"epoch": 0.0,
"learning_rate": 0.00014285714285714284,
"loss": 5.9423,
"theoretical_loss": 6.985768006146776,
"tokens_seen": 9437184
},
{
"epoch": 0.0,
"learning_rate": 0.00014384920634920634,
"loss": 6.0329,
"theoretical_loss": 6.976907877280185,
"tokens_seen": 9502720
},
{
"epoch": 0.0,
"learning_rate": 0.00014484126984126984,
"loss": 5.7799,
"theoretical_loss": 6.96812561876801,
"tokens_seen": 9568256
},
{
"epoch": 0.0,
"learning_rate": 0.00014583333333333335,
"loss": 5.9063,
"theoretical_loss": 6.95942001984889,
"tokens_seen": 9633792
},
{
"epoch": 0.0,
"learning_rate": 0.00014682539682539685,
"loss": 5.9317,
"theoretical_loss": 6.950789896660304,
"tokens_seen": 9699328
},
{
"epoch": 0.0,
"learning_rate": 0.00014781746031746032,
"loss": 5.8925,
"theoretical_loss": 6.942234091464025,
"tokens_seen": 9764864
},
{
"epoch": 0.0,
"objective/train/docs_used": 17166,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 6.479184627532959,
"objective/train/theoretical_loss": 6.933751471898896,
"objective/train/tokens_used": 30290400,
"theoretical_loss": 6.933751471898896,
"tokens_seen": 9830400
},
{
"epoch": 0.0,
"learning_rate": 0.00014880952380952382,
"loss": 6.2539,
"theoretical_loss": 6.933751471898896,
"tokens_seen": 9830400
},
{
"epoch": 0.0,
"learning_rate": 0.0001498015873015873,
"loss": 5.996,
"theoretical_loss": 6.925340930259813,
"tokens_seen": 9895936
},
{
"epoch": 0.0,
"learning_rate": 0.0001507936507936508,
"loss": 5.8197,
"theoretical_loss": 6.917001382801793,
"tokens_seen": 9961472
},
{
"epoch": 0.0,
"learning_rate": 0.00015178571428571427,
"loss": 6.0534,
"theoretical_loss": 6.908731769068142,
"tokens_seen": 10027008
},
{
"epoch": 0.0,
"learning_rate": 0.0001527777777777778,
"loss": 5.7838,
"theoretical_loss": 6.900531051241684,
"tokens_seen": 10092544
},
{
"epoch": 0.0,
"learning_rate": 0.00015376984126984128,
"loss": 5.7799,
"theoretical_loss": 6.892398213518156,
"tokens_seen": 10158080
},
{
"epoch": 0.0,
"learning_rate": 0.00015476190476190478,
"loss": 6.1056,
"theoretical_loss": 6.884332261500866,
"tokens_seen": 10223616
},
{
"epoch": 0.0,
"learning_rate": 0.00015575396825396825,
"loss": 5.5432,
"theoretical_loss": 6.876332221615746,
"tokens_seen": 10289152
},
{
"epoch": 0.0,
"learning_rate": 0.00015674603174603175,
"loss": 5.8572,
"theoretical_loss": 6.8683971405459925,
"tokens_seen": 10354688
},
{
"epoch": 0.0,
"learning_rate": 0.00015773809523809523,
"loss": 6.226,
"theoretical_loss": 6.860526084685546,
"tokens_seen": 10420224
},
{
"epoch": 0.0,
"learning_rate": 0.00015873015873015873,
"loss": 5.7117,
"theoretical_loss": 6.852718139610625,
"tokens_seen": 10485760
},
{
"epoch": 0.0,
"learning_rate": 0.0001597222222222222,
"loss": 5.6684,
"theoretical_loss": 6.844972409568637,
"tokens_seen": 10551296
},
{
"epoch": 0.0,
"learning_rate": 0.00016071428571428573,
"loss": 5.8166,
"theoretical_loss": 6.8372880169837895,
"tokens_seen": 10616832
},
{
"epoch": 0.0,
"learning_rate": 0.0001617063492063492,
"loss": 5.971,
"theoretical_loss": 6.829664101978745,
"tokens_seen": 10682368
},
{
"epoch": 0.0,
"learning_rate": 0.0001626984126984127,
"loss": 5.7343,
"theoretical_loss": 6.8220998219117135,
"tokens_seen": 10747904
},
{
"epoch": 0.0,
"learning_rate": 0.00016369047619047618,
"loss": 5.8061,
"theoretical_loss": 6.81459435092839,
"tokens_seen": 10813440
},
{
"epoch": 0.0,
"learning_rate": 0.00016468253968253969,
"loss": 6.0029,
"theoretical_loss": 6.807146879528176,
"tokens_seen": 10878976
},
{
"epoch": 0.0,
"learning_rate": 0.00016567460317460316,
"loss": 5.5367,
"theoretical_loss": 6.79975661414414,
"tokens_seen": 10944512
},
{
"epoch": 0.0,
"learning_rate": 0.00016666666666666666,
"loss": 5.6256,
"theoretical_loss": 6.79242277673622,
"tokens_seen": 11010048
},
{
"epoch": 0.0,
"learning_rate": 0.00016765873015873016,
"loss": 5.7868,
"theoretical_loss": 6.785144604397143,
"tokens_seen": 11075584
},
{
"epoch": 0.0,
"learning_rate": 0.00016865079365079366,
"loss": 5.6046,
"theoretical_loss": 6.777921348970633,
"tokens_seen": 11141120
},
{
"epoch": 0.0,
"learning_rate": 0.00016964285714285717,
"loss": 5.9753,
"theoretical_loss": 6.770752276681419,
"tokens_seen": 11206656
},
{
"epoch": 0.0,
"learning_rate": 0.00017063492063492064,
"loss": 5.9022,
"theoretical_loss": 6.76363666777662,
"tokens_seen": 11272192
},
{
"epoch": 0.0,
"learning_rate": 0.00017162698412698414,
"loss": 5.8921,
"theoretical_loss": 6.756573816178118,
"tokens_seen": 11337728
},
{
"epoch": 0.0,
"learning_rate": 0.00017261904761904762,
"loss": 5.9722,
"theoretical_loss": 6.749563029145481,
"tokens_seen": 11403264
},
{
"epoch": 0.0,
"objective/train/docs_used": 18369,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 5.672413349151611,
"objective/train/theoretical_loss": 6.742603626949085,
"objective/train/tokens_used": 31928800,
"theoretical_loss": 6.742603626949085,
"tokens_seen": 11468800
},
{
"epoch": 0.0,
"learning_rate": 0.00017361111111111112,
"loss": 5.4506,
"theoretical_loss": 6.742603626949085,
"tokens_seen": 11468800
},
{
"epoch": 0.0,
"learning_rate": 0.0001746031746031746,
"loss": 6.0078,
"theoretical_loss": 6.735694942553071,
"tokens_seen": 11534336
},
{
"epoch": 0.0,
"learning_rate": 0.0001755952380952381,
"loss": 5.8091,
"theoretical_loss": 6.728836321307757,
"tokens_seen": 11599872
},
{
"epoch": 0.0,
"learning_rate": 0.0001765873015873016,
"loss": 5.6113,
"theoretical_loss": 6.722027120651221,
"tokens_seen": 11665408
},
{
"epoch": 0.0,
"learning_rate": 0.0001775793650793651,
"loss": 5.6758,
"theoretical_loss": 6.715266709819684,
"tokens_seen": 11730944
},
{
"epoch": 0.0,
"learning_rate": 0.00017857142857142857,
"loss": 5.6378,
"theoretical_loss": 6.708554469566408,
"tokens_seen": 11796480
},
{
"epoch": 0.0,
"learning_rate": 0.00017956349206349207,
"loss": 5.7411,
"theoretical_loss": 6.701889791888819,
"tokens_seen": 11862016
},
{
"epoch": 0.0,
"learning_rate": 0.00018055555555555555,
"loss": 5.8311,
"theoretical_loss": 6.695272079763555,
"tokens_seen": 11927552
},
{
"epoch": 0.0,
"learning_rate": 0.00018154761904761905,
"loss": 5.2846,
"theoretical_loss": 6.688700746889175,
"tokens_seen": 11993088
},
{
"epoch": 0.0,
"learning_rate": 0.00018253968253968252,
"loss": 5.7687,
"theoretical_loss": 6.682175217436269,
"tokens_seen": 12058624
},
{
"epoch": 0.0,
"learning_rate": 0.00018353174603174602,
"loss": 5.7687,
"theoretical_loss": 6.675694925804711,
"tokens_seen": 12124160
},
{
"epoch": 0.0,
"learning_rate": 0.00018452380952380953,
"loss": 5.6209,
"theoretical_loss": 6.66925931638781,
"tokens_seen": 12189696
},
{
"epoch": 0.0,
"learning_rate": 0.00018551587301587303,
"loss": 5.9193,
"theoretical_loss": 6.662867843343154,
"tokens_seen": 12255232
},
{
"epoch": 0.0,
"learning_rate": 0.0001865079365079365,
"loss": 5.6754,
"theoretical_loss": 6.6565199703698745,
"tokens_seen": 12320768
},
{
"epoch": 0.0,
"learning_rate": 0.0001875,
"loss": 5.8597,
"theoretical_loss": 6.6502151704921655,
"tokens_seen": 12386304
},
{
"epoch": 0.0,
"learning_rate": 0.0001884920634920635,
"loss": 5.709,
"theoretical_loss": 6.643952925848826,
"tokens_seen": 12451840
},
{
"epoch": 0.0,
"learning_rate": 0.00018948412698412698,
"loss": 5.7054,
"theoretical_loss": 6.637732727488622,
"tokens_seen": 12517376
},
{
"epoch": 0.0,
"learning_rate": 0.00019047619047619048,
"loss": 5.5972,
"theoretical_loss": 6.6315540751713,
"tokens_seen": 12582912
},
{
"epoch": 0.0,
"learning_rate": 0.00019146825396825398,
"loss": 5.5887,
"theoretical_loss": 6.625416477174044,
"tokens_seen": 12648448
},
{
"epoch": 0.0,
"learning_rate": 0.00019246031746031748,
"loss": 5.7082,
"theoretical_loss": 6.619319450103218,
"tokens_seen": 12713984
},
{
"epoch": 0.0,
"learning_rate": 0.00019345238095238096,
"loss": 5.5405,
"theoretical_loss": 6.613262518711206,
"tokens_seen": 12779520
},
{
"epoch": 0.0,
"learning_rate": 0.00019444444444444446,
"loss": 5.5806,
"theoretical_loss": 6.607245215718208,
"tokens_seen": 12845056
},
{
"epoch": 0.0,
"learning_rate": 0.00019543650793650793,
"loss": 5.4612,
"theoretical_loss": 6.601267081638806,
"tokens_seen": 12910592
},
{
"epoch": 0.0,
"learning_rate": 0.00019642857142857144,
"loss": 5.6405,
"theoretical_loss": 6.595327664613176,
"tokens_seen": 12976128
},
{
"epoch": 0.0,
"learning_rate": 0.0001974206349206349,
"loss": 5.3968,
"theoretical_loss": 6.58942652024278,
"tokens_seen": 13041664
},
{
"epoch": 0.0,
"objective/train/docs_used": 19017,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 5.398904323577881,
"objective/train/theoretical_loss": 6.583563211430409,
"objective/train/tokens_used": 33567200,
"theoretical_loss": 6.583563211430409,
"tokens_seen": 13107200
},
{
"epoch": 0.0,
"learning_rate": 0.0001984126984126984,
"loss": 5.5392,
"theoretical_loss": 6.583563211430409,
"tokens_seen": 13107200
},
{
"epoch": 0.0,
"learning_rate": 0.00019940476190476191,
"loss": 5.3995,
"theoretical_loss": 6.577737308224426,
"tokens_seen": 13172736
},
{
"epoch": 0.0,
"learning_rate": 0.00020039682539682542,
"loss": 5.5665,
"theoretical_loss": 6.571948387667094,
"tokens_seen": 13238272
},
{
"epoch": 0.0,
"learning_rate": 0.0002013888888888889,
"loss": 5.5745,
"theoretical_loss": 6.566196033646859,
"tokens_seen": 13303808
},
{
"epoch": 0.0,
"learning_rate": 0.0002023809523809524,
"loss": 5.4756,
"theoretical_loss": 6.560479836754459,
"tokens_seen": 13369344
},
{
"epoch": 0.0,
"learning_rate": 0.00020337301587301587,
"loss": 5.7895,
"theoretical_loss": 6.554799394142741,
"tokens_seen": 13434880
},
{
"epoch": 0.0,
"learning_rate": 0.00020436507936507937,
"loss": 5.3965,
"theoretical_loss": 6.549154309390087,
"tokens_seen": 13500416
},
{
"epoch": 0.0,
"learning_rate": 0.00020535714285714284,
"loss": 5.494,
"theoretical_loss": 6.5435441923673245,
"tokens_seen": 13565952
},
{
"epoch": 0.0,
"learning_rate": 0.00020634920634920634,
"loss": 5.5559,
"theoretical_loss": 6.537968659108021,
"tokens_seen": 13631488
},
{
"epoch": 0.0,
"learning_rate": 0.00020734126984126985,
"loss": 5.9125,
"theoretical_loss": 6.532427331682063,
"tokens_seen": 13697024
},
{
"epoch": 0.0,
"learning_rate": 0.00020833333333333335,
"loss": 5.5235,
"theoretical_loss": 6.5269198380724145,
"tokens_seen": 13762560
},
{
"epoch": 0.0,
"learning_rate": 0.00020932539682539685,
"loss": 5.6307,
"theoretical_loss": 6.5214458120549725,
"tokens_seen": 13828096
},
{
"epoch": 0.0,
"learning_rate": 0.00021031746031746032,
"loss": 5.3661,
"theoretical_loss": 6.516004893081412,
"tokens_seen": 13893632
},
{
"epoch": 0.0,
"learning_rate": 0.00021130952380952382,
"loss": 5.2823,
"theoretical_loss": 6.510596726164945,
"tokens_seen": 13959168
},
{
"epoch": 0.0,
"learning_rate": 0.0002123015873015873,
"loss": 5.4241,
"theoretical_loss": 6.505220961768906,
"tokens_seen": 14024704
},
{
"epoch": 0.0,
"learning_rate": 0.0002132936507936508,
"loss": 5.3679,
"theoretical_loss": 6.499877255698063,
"tokens_seen": 14090240
},
{
"epoch": 0.0,
"learning_rate": 0.00021428571428571427,
"loss": 5.2005,
"theoretical_loss": 6.494565268992613,
"tokens_seen": 14155776
},
{
"epoch": 0.0,
"learning_rate": 0.0002152777777777778,
"loss": 5.6155,
"theoretical_loss": 6.489284667824741,
"tokens_seen": 14221312
},
{
"epoch": 0.0,
"learning_rate": 0.00021626984126984128,
"loss": 5.6027,
"theoretical_loss": 6.484035123397694,
"tokens_seen": 14286848
},
{
"epoch": 0.0,
"learning_rate": 0.00021726190476190478,
"loss": 5.5959,
"theoretical_loss": 6.478816311847305,
"tokens_seen": 14352384
},
{
"epoch": 0.0,
"learning_rate": 0.00021825396825396825,
"loss": 5.7842,
"theoretical_loss": 6.473627914145865,
"tokens_seen": 14417920
},
{
"epoch": 0.0,
"learning_rate": 0.00021924603174603176,
"loss": 5.4174,
"theoretical_loss": 6.468469616008303,
"tokens_seen": 14483456
},
{
"epoch": 0.0,
"learning_rate": 0.00022023809523809523,
"loss": 5.4423,
"theoretical_loss": 6.463341107800611,
"tokens_seen": 14548992
},
{
"epoch": 0.0,
"learning_rate": 0.00022123015873015873,
"loss": 5.2413,
"theoretical_loss": 6.458242084450426,
"tokens_seen": 14614528
},
{
"epoch": 0.0,
"learning_rate": 0.0002222222222222222,
"loss": 5.4841,
"theoretical_loss": 6.453172245359726,
"tokens_seen": 14680064
},
{
"epoch": 0.0,
"objective/train/docs_used": 20187,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 5.4210638999938965,
"objective/train/theoretical_loss": 6.448131294319589,
"objective/train/tokens_used": 35205600,
"theoretical_loss": 6.448131294319589,
"tokens_seen": 14745600
},
{
"epoch": 0.0,
"learning_rate": 0.00022321428571428573,
"loss": 5.318,
"theoretical_loss": 6.448131294319589,
"tokens_seen": 14745600
},
{
"epoch": 0.0,
"learning_rate": 0.0002242063492063492,
"loss": 5.5298,
"theoretical_loss": 6.443118939426935,
"tokens_seen": 14811136
},
{
"epoch": 0.0,
"learning_rate": 0.0002251984126984127,
"loss": 5.5104,
"theoretical_loss": 6.438134893003209,
"tokens_seen": 14876672
},
{
"epoch": 0.0,
"learning_rate": 0.00022619047619047618,
"loss": 5.4089,
"theoretical_loss": 6.43317887151496,
"tokens_seen": 14942208
},
{
"epoch": 0.0,
"learning_rate": 0.00022718253968253969,
"loss": 5.5783,
"theoretical_loss": 6.428250595496234,
"tokens_seen": 15007744
},
{
"epoch": 0.0,
"learning_rate": 0.00022817460317460316,
"loss": 5.4029,
"theoretical_loss": 6.4233497894727805,
"tokens_seen": 15073280
},
{
"epoch": 0.0,
"learning_rate": 0.00022916666666666666,
"loss": 5.6043,
"theoretical_loss": 6.418476181887964,
"tokens_seen": 15138816
},
{
"epoch": 0.0,
"learning_rate": 0.00023015873015873016,
"loss": 5.5123,
"theoretical_loss": 6.413629505030386,
"tokens_seen": 15204352
},
{
"epoch": 0.0,
"learning_rate": 0.00023115079365079367,
"loss": 5.5084,
"theoretical_loss": 6.408809494963135,
"tokens_seen": 15269888
},
{
"epoch": 0.0,
"learning_rate": 0.00023214285714285717,
"loss": 5.269,
"theoretical_loss": 6.404015891454651,
"tokens_seen": 15335424
},
{
"epoch": 0.0,
"learning_rate": 0.00023313492063492064,
"loss": 5.4379,
"theoretical_loss": 6.399248437911128,
"tokens_seen": 15400960
},
{
"epoch": 0.0,
"learning_rate": 0.00023412698412698414,
"loss": 5.4346,
"theoretical_loss": 6.394506881310447,
"tokens_seen": 15466496
},
{
"epoch": 0.0,
"learning_rate": 0.00023511904761904762,
"loss": 5.7191,
"theoretical_loss": 6.38979097213757,
"tokens_seen": 15532032
},
{
"epoch": 0.0,
"learning_rate": 0.00023611111111111112,
"loss": 5.5484,
"theoretical_loss": 6.385100464321383,
"tokens_seen": 15597568
},
{
"epoch": 0.0,
"learning_rate": 0.0002371031746031746,
"loss": 5.4379,
"theoretical_loss": 6.380435115172919,
"tokens_seen": 15663104
},
{
"epoch": 0.0,
"learning_rate": 0.0002380952380952381,
"loss": 5.4785,
"theoretical_loss": 6.375794685324962,
"tokens_seen": 15728640
},
{
"epoch": 0.0,
"learning_rate": 0.0002390873015873016,
"loss": 5.2088,
"theoretical_loss": 6.371178938672962,
"tokens_seen": 15794176
},
{
"epoch": 0.0,
"learning_rate": 0.0002400793650793651,
"loss": 5.5076,
"theoretical_loss": 6.366587642317243,
"tokens_seen": 15859712
},
{
"epoch": 0.0,
"learning_rate": 0.00024107142857142857,
"loss": 5.6351,
"theoretical_loss": 6.362020566506475,
"tokens_seen": 15925248
},
{
"epoch": 0.0,
"learning_rate": 0.00024206349206349207,
"loss": 5.6029,
"theoretical_loss": 6.3574774845823665,
"tokens_seen": 15990784
},
{
"epoch": 0.0,
"learning_rate": 0.00024305555555555555,
"loss": 5.1969,
"theoretical_loss": 6.352958172925549,
"tokens_seen": 16056320
},
{
"epoch": 0.0,
"learning_rate": 0.00024404761904761905,
"loss": 5.285,
"theoretical_loss": 6.348462410902625,
"tokens_seen": 16121856
},
{
"epoch": 0.0,
"learning_rate": 0.00024503968253968255,
"loss": 5.6805,
"theoretical_loss": 6.343989980814362,
"tokens_seen": 16187392
},
{
"epoch": 0.0,
"learning_rate": 0.000246031746031746,
"loss": 5.5855,
"theoretical_loss": 6.339540667844965,
"tokens_seen": 16252928
},
{
"epoch": 0.0,
"learning_rate": 0.00024702380952380955,
"loss": 5.3559,
"theoretical_loss": 6.335114260012455,
"tokens_seen": 16318464
},
{
"epoch": 0.0,
"objective/train/docs_used": 20737,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 5.463630199432373,
"objective/train/theoretical_loss": 6.330710548120079,
"objective/train/tokens_used": 36844000,
"theoretical_loss": 6.330710548120079,
"tokens_seen": 16384000
},
{
"epoch": 0.0,
"learning_rate": 0.000248015873015873,
"loss": 5.515,
"theoretical_loss": 6.330710548120079,
"tokens_seen": 16384000
},
{
"epoch": 0.0,
"learning_rate": 0.0002490079365079365,
"loss": 5.3596,
"theoretical_loss": 6.326329325708746,
"tokens_seen": 16449536
},
{
"epoch": 0.01,
"learning_rate": 0.00025,
"loss": 5.249,
"theoretical_loss": 6.321970389010465,
"tokens_seen": 16515072
},
{
"epoch": 0.01,
"learning_rate": 0.0002509920634920635,
"loss": 5.3675,
"theoretical_loss": 6.317633536902759,
"tokens_seen": 16580608
},
{
"epoch": 0.01,
"learning_rate": 0.000251984126984127,
"loss": 5.0919,
"theoretical_loss": 6.313318570864016,
"tokens_seen": 16646144
},
{
"epoch": 0.01,
"learning_rate": 0.00025297619047619046,
"loss": 5.1228,
"theoretical_loss": 6.30902529492978,
"tokens_seen": 16711680
},
{
"epoch": 0.01,
"learning_rate": 0.00025396825396825396,
"loss": 5.4573,
"theoretical_loss": 6.304753515649935,
"tokens_seen": 16777216
},
{
"epoch": 0.01,
"learning_rate": 0.00025496031746031746,
"loss": 5.1499,
"theoretical_loss": 6.30050304204677,
"tokens_seen": 16842752
},
{
"epoch": 0.01,
"learning_rate": 0.00025595238095238096,
"loss": 5.6432,
"theoretical_loss": 6.296273685573913,
"tokens_seen": 16908288
},
{
"epoch": 0.01,
"learning_rate": 0.0002569444444444444,
"loss": 5.439,
"theoretical_loss": 6.292065260076094,
"tokens_seen": 16973824
},
{
"epoch": 0.01,
"learning_rate": 0.00025793650793650796,
"loss": 5.2959,
"theoretical_loss": 6.287877581749726,
"tokens_seen": 17039360
},
{
"epoch": 0.01,
"learning_rate": 0.00025892857142857146,
"loss": 5.3502,
"theoretical_loss": 6.2837104691042915,
"tokens_seen": 17104896
},
{
"epoch": 0.01,
"learning_rate": 0.00025992063492063497,
"loss": 5.5134,
"theoretical_loss": 6.279563742924502,
"tokens_seen": 17170432
},
{
"epoch": 0.01,
"learning_rate": 0.0002609126984126984,
"loss": 5.4871,
"theoretical_loss": 6.275437226233224,
"tokens_seen": 17235968
},
{
"epoch": 0.01,
"learning_rate": 0.0002619047619047619,
"loss": 5.3132,
"theoretical_loss": 6.271330744255137,
"tokens_seen": 17301504
},
{
"epoch": 0.01,
"learning_rate": 0.0002628968253968254,
"loss": 5.1796,
"theoretical_loss": 6.267244124381133,
"tokens_seen": 17367040
},
{
"epoch": 0.01,
"learning_rate": 0.0002638888888888889,
"loss": 5.2656,
"theoretical_loss": 6.2631771961334035,
"tokens_seen": 17432576
},
{
"epoch": 0.01,
"learning_rate": 0.00026488095238095237,
"loss": 5.5896,
"theoretical_loss": 6.259129791131242,
"tokens_seen": 17498112
},
{
"epoch": 0.01,
"learning_rate": 0.00026587301587301587,
"loss": 5.14,
"theoretical_loss": 6.255101743057493,
"tokens_seen": 17563648
},
{
"epoch": 0.01,
"learning_rate": 0.00026686507936507937,
"loss": 5.3606,
"theoretical_loss": 6.251092887625685,
"tokens_seen": 17629184
},
{
"epoch": 0.01,
"learning_rate": 0.00026785714285714287,
"loss": 5.487,
"theoretical_loss": 6.247103062547796,
"tokens_seen": 17694720
},
{
"epoch": 0.01,
"learning_rate": 0.0002688492063492063,
"loss": 5.2887,
"theoretical_loss": 6.243132107502647,
"tokens_seen": 17760256
},
{
"epoch": 0.01,
"learning_rate": 0.0002698412698412698,
"loss": 5.4254,
"theoretical_loss": 6.239179864104911,
"tokens_seen": 17825792
},
{
"epoch": 0.01,
"learning_rate": 0.0002708333333333333,
"loss": 5.4668,
"theoretical_loss": 6.235246175874727,
"tokens_seen": 17891328
},
{
"epoch": 0.01,
"learning_rate": 0.0002718253968253968,
"loss": 5.3584,
"theoretical_loss": 6.231330888207894,
"tokens_seen": 17956864
},
{
"epoch": 0.01,
"objective/train/docs_used": 21782,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 5.119491100311279,
"objective/train/theoretical_loss": 6.227433848346639,
"objective/train/tokens_used": 38482400,
"theoretical_loss": 6.227433848346639,
"tokens_seen": 18022400
},
{
"epoch": 0.01,
"learning_rate": 0.0002728174603174603,
"loss": 5.1069,
"theoretical_loss": 6.227433848346639,
"tokens_seen": 18022400
},
{
"epoch": 0.01,
"learning_rate": 0.0002738095238095238,
"loss": 5.1568,
"theoretical_loss": 6.22355490535095,
"tokens_seen": 18087936
},
{
"epoch": 0.01,
"learning_rate": 0.0002748015873015873,
"loss": 5.4409,
"theoretical_loss": 6.219693910070452,
"tokens_seen": 18153472
},
{
"epoch": 0.01,
"learning_rate": 0.00027579365079365083,
"loss": 5.1939,
"theoretical_loss": 6.215850715116817,
"tokens_seen": 18219008
},
{
"epoch": 0.01,
"learning_rate": 0.00027678571428571433,
"loss": 5.2274,
"theoretical_loss": 6.212025174836697,
"tokens_seen": 18284544
},
{
"epoch": 0.01,
"learning_rate": 0.0002777777777777778,
"loss": 5.5299,
"theoretical_loss": 6.208217145285173,
"tokens_seen": 18350080
},
{
"epoch": 0.01,
"learning_rate": 0.0002787698412698413,
"loss": 5.4441,
"theoretical_loss": 6.204426484199696,
"tokens_seen": 18415616
},
{
"epoch": 0.01,
"learning_rate": 0.0002797619047619048,
"loss": 5.3135,
"theoretical_loss": 6.200653050974525,
"tokens_seen": 18481152
},
{
"epoch": 0.01,
"learning_rate": 0.0002807539682539683,
"loss": 5.189,
"theoretical_loss": 6.196896706635635,
"tokens_seen": 18546688
},
{
"epoch": 0.01,
"learning_rate": 0.00028174603174603173,
"loss": 5.0786,
"theoretical_loss": 6.1931573138160925,
"tokens_seen": 18612224
},
{
"epoch": 0.01,
"learning_rate": 0.00028273809523809523,
"loss": 5.0434,
"theoretical_loss": 6.189434736731897,
"tokens_seen": 18677760
},
{
"epoch": 0.01,
"learning_rate": 0.00028373015873015873,
"loss": 5.0132,
"theoretical_loss": 6.185728841158257,
"tokens_seen": 18743296
},
{
"epoch": 0.01,
"learning_rate": 0.00028472222222222223,
"loss": 5.1159,
"theoretical_loss": 6.182039494406309,
"tokens_seen": 18808832
},
{
"epoch": 0.01,
"learning_rate": 0.0002857142857142857,
"loss": 5.1786,
"theoretical_loss": 6.178366565300266,
"tokens_seen": 18874368
},
{
"epoch": 0.01,
"learning_rate": 0.0002867063492063492,
"loss": 5.1947,
"theoretical_loss": 6.174709924154978,
"tokens_seen": 18939904
},
{
"epoch": 0.01,
"learning_rate": 0.0002876984126984127,
"loss": 5.2396,
"theoretical_loss": 6.171069442753909,
"tokens_seen": 19005440
},
{
"epoch": 0.01,
"learning_rate": 0.0002886904761904762,
"loss": 5.3947,
"theoretical_loss": 6.1674449943275045,
"tokens_seen": 19070976
},
{
"epoch": 0.01,
"learning_rate": 0.0002896825396825397,
"loss": 4.8732,
"theoretical_loss": 6.16383645353196,
"tokens_seen": 19136512
},
{
"epoch": 0.01,
"learning_rate": 0.0002906746031746032,
"loss": 5.3178,
"theoretical_loss": 6.160243696428367,
"tokens_seen": 19202048
},
{
"epoch": 0.01,
"learning_rate": 0.0002916666666666667,
"loss": 5.1441,
"theoretical_loss": 6.156666600462238,
"tokens_seen": 19267584
},
{
"epoch": 0.01,
"learning_rate": 0.0002926587301587302,
"loss": 5.2695,
"theoretical_loss": 6.153105044443393,
"tokens_seen": 19333120
},
{
"epoch": 0.01,
"learning_rate": 0.0002936507936507937,
"loss": 5.3181,
"theoretical_loss": 6.149558908526206,
"tokens_seen": 19398656
},
{
"epoch": 0.01,
"learning_rate": 0.00029464285714285714,
"loss": 5.2188,
"theoretical_loss": 6.146028074190217,
"tokens_seen": 19464192
},
{
"epoch": 0.01,
"learning_rate": 0.00029563492063492064,
"loss": 5.1652,
"theoretical_loss": 6.14251242422106,
"tokens_seen": 19529728
},
{
"epoch": 0.01,
"learning_rate": 0.00029662698412698414,
"loss": 5.3475,
"theoretical_loss": 6.139011842691756,
"tokens_seen": 19595264
},
{
"epoch": 0.01,
"objective/train/docs_used": 22379,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 5.661308765411377,
"objective/train/theoretical_loss": 6.135526214944321,
"objective/train/tokens_used": 40120800,
"theoretical_loss": 6.135526214944321,
"tokens_seen": 19660800
},
{
"epoch": 0.01,
"learning_rate": 0.00029761904761904765,
"loss": 5.6218,
"theoretical_loss": 6.135526214944321,
"tokens_seen": 19660800
},
{
"epoch": 0.01,
"learning_rate": 0.0002986111111111111,
"loss": 5.4528,
"theoretical_loss": 6.1320554275717,
"tokens_seen": 19726336
},
{
"epoch": 0.01,
"learning_rate": 0.0002996031746031746,
"loss": 5.1971,
"theoretical_loss": 6.128599368400017,
"tokens_seen": 19791872
},
{
"epoch": 0.01,
"learning_rate": 0.0003005952380952381,
"loss": 5.2823,
"theoretical_loss": 6.125157926471134,
"tokens_seen": 19857408
},
{
"epoch": 0.01,
"learning_rate": 0.0003015873015873016,
"loss": 5.1154,
"theoretical_loss": 6.121730992025516,
"tokens_seen": 19922944
},
{
"epoch": 0.01,
"learning_rate": 0.00030257936507936505,
"loss": 4.8293,
"theoretical_loss": 6.118318456485394,
"tokens_seen": 19988480
},
{
"epoch": 0.01,
"learning_rate": 0.00030357142857142855,
"loss": 5.0591,
"theoretical_loss": 6.114920212438209,
"tokens_seen": 20054016
},
{
"epoch": 0.01,
"learning_rate": 0.00030456349206349205,
"loss": 5.2452,
"theoretical_loss": 6.111536153620355,
"tokens_seen": 20119552
},
{
"epoch": 0.01,
"learning_rate": 0.0003055555555555556,
"loss": 5.1158,
"theoretical_loss": 6.108166174901191,
"tokens_seen": 20185088
},
{
"epoch": 0.01,
"learning_rate": 0.00030654761904761905,
"loss": 5.0927,
"theoretical_loss": 6.104810172267331,
"tokens_seen": 20250624
},
{
"epoch": 0.01,
"learning_rate": 0.00030753968253968255,
"loss": 5.1097,
"theoretical_loss": 6.101468042807199,
"tokens_seen": 20316160
},
{
"epoch": 0.01,
"learning_rate": 0.00030853174603174605,
"loss": 5.0109,
"theoretical_loss": 6.098139684695851,
"tokens_seen": 20381696
},
{
"epoch": 0.01,
"learning_rate": 0.00030952380952380956,
"loss": 5.2,
"theoretical_loss": 6.094824997180048,
"tokens_seen": 20447232
},
{
"epoch": 0.01,
"learning_rate": 0.000310515873015873,
"loss": 4.919,
"theoretical_loss": 6.091523880563589,
"tokens_seen": 20512768
},
{
"epoch": 0.01,
"learning_rate": 0.0003115079365079365,
"loss": 4.7726,
"theoretical_loss": 6.088236236192881,
"tokens_seen": 20578304
},
{
"epoch": 0.01,
"learning_rate": 0.0003125,
"loss": 5.1579,
"theoretical_loss": 6.0849619664427586,
"tokens_seen": 20643840
},
{
"epoch": 0.01,
"learning_rate": 0.0003134920634920635,
"loss": 5.2649,
"theoretical_loss": 6.0817009747025415,
"tokens_seen": 20709376
},
{
"epoch": 0.01,
"learning_rate": 0.000314484126984127,
"loss": 4.9338,
"theoretical_loss": 6.07845316536232,
"tokens_seen": 20774912
},
{
"epoch": 0.01,
"learning_rate": 0.00031547619047619046,
"loss": 5.2449,
"theoretical_loss": 6.075218443799468,
"tokens_seen": 20840448
},
{
"epoch": 0.01,
"learning_rate": 0.00031646825396825396,
"loss": 5.2222,
"theoretical_loss": 6.071996716365382,
"tokens_seen": 20905984
},
{
"epoch": 0.01,
"learning_rate": 0.00031746031746031746,
"loss": 4.8122,
"theoretical_loss": 6.068787890372443,
"tokens_seen": 20971520
},
{
"epoch": 0.01,
"learning_rate": 0.00031845238095238096,
"loss": 4.983,
"theoretical_loss": 6.0655918740811865,
"tokens_seen": 21037056
},
{
"epoch": 0.01,
"learning_rate": 0.0003194444444444444,
"loss": 4.9428,
"theoretical_loss": 6.062408576687682,
"tokens_seen": 21102592
},
{
"epoch": 0.01,
"learning_rate": 0.00032043650793650796,
"loss": 5.0493,
"theoretical_loss": 6.059237908311129,
"tokens_seen": 21168128
},
{
"epoch": 0.01,
"learning_rate": 0.00032142857142857147,
"loss": 4.8675,
"theoretical_loss": 6.056079779981644,
"tokens_seen": 21233664
},
{
"epoch": 0.01,
"objective/train/docs_used": 23601,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 5.199455738067627,
"objective/train/theoretical_loss": 6.052934103628253,
"objective/train/tokens_used": 41759200,
"theoretical_loss": 6.052934103628253,
"tokens_seen": 21299200
},
{
"epoch": 0.01,
"learning_rate": 0.00032242063492063497,
"loss": 5.2637,
"theoretical_loss": 6.052934103628253,
"tokens_seen": 21299200
},
{
"epoch": 0.01,
"learning_rate": 0.0003234126984126984,
"loss": 5.4325,
"theoretical_loss": 6.049800792067078,
"tokens_seen": 21364736
},
{
"epoch": 0.01,
"learning_rate": 0.0003244047619047619,
"loss": 4.8274,
"theoretical_loss": 6.046679758989709,
"tokens_seen": 21430272
},
{
"epoch": 0.01,
"learning_rate": 0.0003253968253968254,
"loss": 5.2104,
"theoretical_loss": 6.043570918951775,
"tokens_seen": 21495808
},
{
"epoch": 0.01,
"learning_rate": 0.0003263888888888889,
"loss": 5.1036,
"theoretical_loss": 6.040474187361681,
"tokens_seen": 21561344
},
{
"epoch": 0.01,
"learning_rate": 0.00032738095238095237,
"loss": 4.8811,
"theoretical_loss": 6.037389480469548,
"tokens_seen": 21626880
},
{
"epoch": 0.01,
"learning_rate": 0.00032837301587301587,
"loss": 5.0504,
"theoretical_loss": 6.034316715356304,
"tokens_seen": 21692416
},
{
"epoch": 0.01,
"learning_rate": 0.00032936507936507937,
"loss": 5.0898,
"theoretical_loss": 6.031255809922974,
"tokens_seen": 21757952
},
{
"epoch": 0.01,
"learning_rate": 0.00033035714285714287,
"loss": 5.0121,
"theoretical_loss": 6.0282066828801195,
"tokens_seen": 21823488
},
{
"epoch": 0.01,
"learning_rate": 0.0003313492063492063,
"loss": 5.1363,
"theoretical_loss": 6.0251692537374595,
"tokens_seen": 21889024
},
{
"epoch": 0.01,
"learning_rate": 0.0003323412698412698,
"loss": 5.1503,
"theoretical_loss": 6.022143442793643,
"tokens_seen": 21954560
},
{
"epoch": 0.01,
"learning_rate": 0.0003333333333333333,
"loss": 5.1066,
"theoretical_loss": 6.0191291711261945,
"tokens_seen": 22020096
},
{
"epoch": 0.01,
"learning_rate": 0.0003343253968253968,
"loss": 5.0889,
"theoretical_loss": 6.016126360581609,
"tokens_seen": 22085632
},
{
"epoch": 0.01,
"learning_rate": 0.0003353174603174603,
"loss": 4.9077,
"theoretical_loss": 6.013134933765605,
"tokens_seen": 22151168
},
{
"epoch": 0.01,
"learning_rate": 0.0003363095238095238,
"loss": 5.2813,
"theoretical_loss": 6.010154814033528,
"tokens_seen": 22216704
},
{
"epoch": 0.01,
"learning_rate": 0.00033730158730158733,
"loss": 4.6999,
"theoretical_loss": 6.007185925480902,
"tokens_seen": 22282240
},
{
"epoch": 0.01,
"learning_rate": 0.00033829365079365083,
"loss": 5.0728,
"theoretical_loss": 6.0042281929341375,
"tokens_seen": 22347776
},
{
"epoch": 0.01,
"learning_rate": 0.00033928571428571433,
"loss": 5.1319,
"theoretical_loss": 6.001281541941363,
"tokens_seen": 22413312
},
{
"epoch": 0.01,
"learning_rate": 0.0003402777777777778,
"loss": 5.0915,
"theoretical_loss": 5.998345898763421,
"tokens_seen": 22478848
},
{
"epoch": 0.01,
"learning_rate": 0.0003412698412698413,
"loss": 4.8912,
"theoretical_loss": 5.995421190364983,
"tokens_seen": 22544384
},
{
"epoch": 0.01,
"learning_rate": 0.0003422619047619048,
"loss": 5.0317,
"theoretical_loss": 5.992507344405814,
"tokens_seen": 22609920
},
{
"epoch": 0.01,
"learning_rate": 0.0003432539682539683,
"loss": 5.1603,
"theoretical_loss": 5.9896042892321635,
"tokens_seen": 22675456
},
{
"epoch": 0.01,
"learning_rate": 0.00034424603174603173,
"loss": 4.9822,
"theoretical_loss": 5.986711953868287,
"tokens_seen": 22740992
},
{
"epoch": 0.01,
"learning_rate": 0.00034523809523809523,
"loss": 5.3948,
"theoretical_loss": 5.983830268008107,
"tokens_seen": 22806528
},
{
"epoch": 0.01,
"learning_rate": 0.00034623015873015873,
"loss": 5.222,
"theoretical_loss": 5.980959162006979,
"tokens_seen": 22872064
},
{
"epoch": 0.01,
"objective/train/docs_used": 24859,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 4.439995288848877,
"objective/train/theoretical_loss": 5.978098566873603,
"objective/train/tokens_used": 43397600,
"theoretical_loss": 5.978098566873603,
"tokens_seen": 22937600
},
{
"epoch": 0.01,
"learning_rate": 0.00034722222222222224,
"loss": 4.9666,
"theoretical_loss": 5.978098566873603,
"tokens_seen": 22937600
},
{
"epoch": 0.01,
"learning_rate": 0.0003482142857142857,
"loss": 4.7873,
"theoretical_loss": 5.975248414262053,
"tokens_seen": 23003136
},
{
"epoch": 0.01,
"learning_rate": 0.0003492063492063492,
"loss": 5.1429,
"theoretical_loss": 5.972408636463909,
"tokens_seen": 23068672
},
{
"epoch": 0.01,
"learning_rate": 0.0003501984126984127,
"loss": 4.8387,
"theoretical_loss": 5.969579166400528,
"tokens_seen": 23134208
},
{
"epoch": 0.01,
"learning_rate": 0.0003511904761904762,
"loss": 5.2741,
"theoretical_loss": 5.966759937615427,
"tokens_seen": 23199744
},
{
"epoch": 0.01,
"learning_rate": 0.0003521825396825397,
"loss": 4.9993,
"theoretical_loss": 5.96395088426676,
"tokens_seen": 23265280
},
{
"epoch": 0.01,
"learning_rate": 0.0003531746031746032,
"loss": 4.838,
"theoretical_loss": 5.961151941119932,
"tokens_seen": 23330816
},
{
"epoch": 0.01,
"learning_rate": 0.0003541666666666667,
"loss": 4.8937,
"theoretical_loss": 5.95836304354031,
"tokens_seen": 23396352
},
{
"epoch": 0.01,
"learning_rate": 0.0003551587301587302,
"loss": 5.1755,
"theoretical_loss": 5.9555841274860395,
"tokens_seen": 23461888
},
{
"epoch": 0.01,
"learning_rate": 0.0003561507936507937,
"loss": 5.0192,
"theoretical_loss": 5.952815129500973,
"tokens_seen": 23527424
},
{
"epoch": 0.01,
"learning_rate": 0.00035714285714285714,
"loss": 5.1528,
"theoretical_loss": 5.950055986707699,
"tokens_seen": 23592960
},
{
"epoch": 0.01,
"learning_rate": 0.00035813492063492064,
"loss": 5.2473,
"theoretical_loss": 5.9473066368006755,
"tokens_seen": 23658496
},
{
"epoch": 0.01,
"learning_rate": 0.00035912698412698415,
"loss": 5.1431,
"theoretical_loss": 5.944567018039454,
"tokens_seen": 23724032
},
{
"epoch": 0.01,
"learning_rate": 0.00036011904761904765,
"loss": 5.2078,
"theoretical_loss": 5.941837069242023,
"tokens_seen": 23789568
},
{
"epoch": 0.01,
"learning_rate": 0.0003611111111111111,
"loss": 5.2074,
"theoretical_loss": 5.939116729778224,
"tokens_seen": 23855104
},
{
"epoch": 0.01,
"learning_rate": 0.0003621031746031746,
"loss": 5.045,
"theoretical_loss": 5.936405939563279,
"tokens_seen": 23920640
},
{
"epoch": 0.01,
"learning_rate": 0.0003630952380952381,
"loss": 5.0272,
"theoretical_loss": 5.93370463905141,
"tokens_seen": 23986176
},
{
"epoch": 0.01,
"learning_rate": 0.0003640873015873016,
"loss": 5.0628,
"theoretical_loss": 5.931012769229536,
"tokens_seen": 24051712
},
{
"epoch": 0.01,
"learning_rate": 0.00036507936507936505,
"loss": 4.6127,
"theoretical_loss": 5.928330271611081,
"tokens_seen": 24117248
},
{
"epoch": 0.01,
"learning_rate": 0.00036607142857142855,
"loss": 4.8408,
"theoretical_loss": 5.925657088229862,
"tokens_seen": 24182784
},
{
"epoch": 0.01,
"learning_rate": 0.00036706349206349205,
"loss": 4.9603,
"theoretical_loss": 5.9229931616340545,
"tokens_seen": 24248320
},
{
"epoch": 0.01,
"learning_rate": 0.0003680555555555556,
"loss": 5.0127,
"theoretical_loss": 5.920338434880263,
"tokens_seen": 24313856
},
{
"epoch": 0.01,
"learning_rate": 0.00036904761904761905,
"loss": 5.3699,
"theoretical_loss": 5.9176928515276535,
"tokens_seen": 24379392
},
{
"epoch": 0.01,
"learning_rate": 0.00037003968253968255,
"loss": 4.9974,
"theoretical_loss": 5.915056355632197,
"tokens_seen": 24444928
},
{
"epoch": 0.01,
"learning_rate": 0.00037103174603174606,
"loss": 4.9348,
"theoretical_loss": 5.912428891740967,
"tokens_seen": 24510464
},
{
"epoch": 0.01,
"objective/train/docs_used": 25411,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 4.414747714996338,
"objective/train/theoretical_loss": 5.90981040488653,
"objective/train/tokens_used": 45036000,
"theoretical_loss": 5.90981040488653,
"tokens_seen": 24576000
},
{
"epoch": 0.01,
"learning_rate": 0.00037202380952380956,
"loss": 5.0376,
"theoretical_loss": 5.90981040488653,
"tokens_seen": 24576000
},
{
"epoch": 0.01,
"learning_rate": 0.000373015873015873,
"loss": 5.0724,
"theoretical_loss": 5.907200840581417,
"tokens_seen": 24641536
},
{
"epoch": 0.01,
"learning_rate": 0.0003740079365079365,
"loss": 5.2247,
"theoretical_loss": 5.904600144812672,
"tokens_seen": 24707072
},
{
"epoch": 0.01,
"learning_rate": 0.000375,
"loss": 5.4811,
"theoretical_loss": 5.902008264036468,
"tokens_seen": 24772608
},
{
"epoch": 0.01,
"learning_rate": 0.0003759920634920635,
"loss": 4.9107,
"theoretical_loss": 5.899425145172803,
"tokens_seen": 24838144
},
{
"epoch": 0.01,
"learning_rate": 0.000376984126984127,
"loss": 5.0933,
"theoretical_loss": 5.896850735600281,
"tokens_seen": 24903680
},
{
"epoch": 0.01,
"learning_rate": 0.00037797619047619046,
"loss": 5.1802,
"theoretical_loss": 5.8942849831509445,
"tokens_seen": 24969216
},
{
"epoch": 0.01,
"learning_rate": 0.00037896825396825396,
"loss": 5.2521,
"theoretical_loss": 5.891727836105194,
"tokens_seen": 25034752
},
{
"epoch": 0.01,
"learning_rate": 0.00037996031746031746,
"loss": 4.8607,
"theoretical_loss": 5.889179243186776,
"tokens_seen": 25100288
},
{
"epoch": 0.01,
"learning_rate": 0.00038095238095238096,
"loss": 5.0973,
"theoretical_loss": 5.886639153557828,
"tokens_seen": 25165824
},
{
"epoch": 0.01,
"learning_rate": 0.0003819444444444444,
"loss": 5.3397,
"theoretical_loss": 5.8841075168140105,
"tokens_seen": 25231360
},
{
"epoch": 0.01,
"learning_rate": 0.00038293650793650797,
"loss": 4.7777,
"theoretical_loss": 5.88158428297969,
"tokens_seen": 25296896
},
{
"epoch": 0.01,
"learning_rate": 0.00038392857142857147,
"loss": 4.7219,
"theoretical_loss": 5.879069402503189,
"tokens_seen": 25362432
},
{
"epoch": 0.01,
"learning_rate": 0.00038492063492063497,
"loss": 5.0299,
"theoretical_loss": 5.876562826252119,
"tokens_seen": 25427968
},
{
"epoch": 0.01,
"learning_rate": 0.0003859126984126984,
"loss": 4.8369,
"theoretical_loss": 5.874064505508748,
"tokens_seen": 25493504
},
{
"epoch": 0.01,
"learning_rate": 0.0003869047619047619,
"loss": 5.0017,
"theoretical_loss": 5.871574391965453,
"tokens_seen": 25559040
},
{
"epoch": 0.01,
"learning_rate": 0.0003878968253968254,
"loss": 4.9705,
"theoretical_loss": 5.8690924377202265,
"tokens_seen": 25624576
},
{
"epoch": 0.01,
"learning_rate": 0.0003888888888888889,
"loss": 5.034,
"theoretical_loss": 5.866618595272241,
"tokens_seen": 25690112
},
{
"epoch": 0.01,
"learning_rate": 0.00038988095238095237,
"loss": 4.7181,
"theoretical_loss": 5.864152817517482,
"tokens_seen": 25755648
},
{
"epoch": 0.01,
"learning_rate": 0.00039087301587301587,
"loss": 4.9562,
"theoretical_loss": 5.8616950577444245,
"tokens_seen": 25821184
},
{
"epoch": 0.01,
"learning_rate": 0.00039186507936507937,
"loss": 4.9405,
"theoretical_loss": 5.859245269629783,
"tokens_seen": 25886720
},
{
"epoch": 0.01,
"learning_rate": 0.0003928571428571429,
"loss": 5.099,
"theoretical_loss": 5.85680340723431,
"tokens_seen": 25952256
},
{
"epoch": 0.01,
"learning_rate": 0.0003938492063492063,
"loss": 4.6503,
"theoretical_loss": 5.85436942499865,
"tokens_seen": 26017792
},
{
"epoch": 0.01,
"learning_rate": 0.0003948412698412698,
"loss": 4.8672,
"theoretical_loss": 5.851943277739253,
"tokens_seen": 26083328
},
{
"epoch": 0.01,
"learning_rate": 0.0003958333333333333,
"loss": 5.0201,
"theoretical_loss": 5.84952492064434,
"tokens_seen": 26148864
},
{
"epoch": 0.01,
"objective/train/docs_used": 26668,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 5.337137222290039,
"objective/train/theoretical_loss": 5.847114309269919,
"objective/train/tokens_used": 46674400,
"theoretical_loss": 5.847114309269919,
"tokens_seen": 26214400
},
{
"epoch": 0.01,
"learning_rate": 0.0003968253968253968,
"loss": 5.1138,
"theoretical_loss": 5.847114309269919,
"tokens_seen": 26214400
},
{
"epoch": 0.01,
"learning_rate": 0.0003978174603174603,
"loss": 4.8239,
"theoretical_loss": 5.844711399535855,
"tokens_seen": 26279936
},
{
"epoch": 0.01,
"learning_rate": 0.00039880952380952383,
"loss": 4.7304,
"theoretical_loss": 5.842316147722,
"tokens_seen": 26345472
},
{
"epoch": 0.01,
"learning_rate": 0.00039980158730158733,
"loss": 5.0092,
"theoretical_loss": 5.839928510464356,
"tokens_seen": 26411008
},
{
"epoch": 0.01,
"learning_rate": 0.00040079365079365083,
"loss": 4.4343,
"theoretical_loss": 5.837548444751306,
"tokens_seen": 26476544
},
{
"epoch": 0.01,
"learning_rate": 0.00040178571428571433,
"loss": 4.9309,
"theoretical_loss": 5.835175907919885,
"tokens_seen": 26542080
},
{
"epoch": 0.01,
"learning_rate": 0.0004027777777777778,
"loss": 5.0148,
"theoretical_loss": 5.832810857652097,
"tokens_seen": 26607616
},
{
"epoch": 0.01,
"learning_rate": 0.0004037698412698413,
"loss": 4.9709,
"theoretical_loss": 5.830453251971296,
"tokens_seen": 26673152
},
{
"epoch": 0.01,
"learning_rate": 0.0004047619047619048,
"loss": 5.1225,
"theoretical_loss": 5.8281030492385835,
"tokens_seen": 26738688
},
{
"epoch": 0.01,
"learning_rate": 0.0004057539682539683,
"loss": 4.9439,
"theoretical_loss": 5.82576020814929,
"tokens_seen": 26804224
},
{
"epoch": 0.01,
"learning_rate": 0.00040674603174603173,
"loss": 5.154,
"theoretical_loss": 5.823424687729469,
"tokens_seen": 26869760
},
{
"epoch": 0.01,
"learning_rate": 0.00040773809523809523,
"loss": 4.6484,
"theoretical_loss": 5.821096447332456,
"tokens_seen": 26935296
},
{
"epoch": 0.01,
"learning_rate": 0.00040873015873015874,
"loss": 4.6262,
"theoretical_loss": 5.818775446635469,
"tokens_seen": 27000832
},
{
"epoch": 0.01,
"learning_rate": 0.00040972222222222224,
"loss": 5.0236,
"theoretical_loss": 5.816461645636238,
"tokens_seen": 27066368
},
{
"epoch": 0.01,
"learning_rate": 0.0004107142857142857,
"loss": 5.1234,
"theoretical_loss": 5.814155004649702,
"tokens_seen": 27131904
},
{
"epoch": 0.01,
"learning_rate": 0.0004117063492063492,
"loss": 4.6509,
"theoretical_loss": 5.811855484304724,
"tokens_seen": 27197440
},
{
"epoch": 0.01,
"learning_rate": 0.0004126984126984127,
"loss": 4.9814,
"theoretical_loss": 5.809563045540864,
"tokens_seen": 27262976
},
{
"epoch": 0.01,
"learning_rate": 0.0004136904761904762,
"loss": 5.0187,
"theoretical_loss": 5.8072776496051866,
"tokens_seen": 27328512
},
{
"epoch": 0.01,
"learning_rate": 0.0004146825396825397,
"loss": 4.9255,
"theoretical_loss": 5.804999258049106,
"tokens_seen": 27394048
},
{
"epoch": 0.01,
"learning_rate": 0.0004156746031746032,
"loss": 5.0265,
"theoretical_loss": 5.802727832725283,
"tokens_seen": 27459584
},
{
"epoch": 0.01,
"learning_rate": 0.0004166666666666667,
"loss": 4.8827,
"theoretical_loss": 5.800463335784541,
"tokens_seen": 27525120
},
{
"epoch": 0.01,
"learning_rate": 0.0004176587301587302,
"loss": 5.0962,
"theoretical_loss": 5.798205729672842,
"tokens_seen": 27590656
},
{
"epoch": 0.01,
"learning_rate": 0.0004186507936507937,
"loss": 5.0409,
"theoretical_loss": 5.795954977128286,
"tokens_seen": 27656192
},
{
"epoch": 0.01,
"learning_rate": 0.00041964285714285714,
"loss": 4.8332,
"theoretical_loss": 5.7937110411781525,
"tokens_seen": 27721728
},
{
"epoch": 0.01,
"learning_rate": 0.00042063492063492065,
"loss": 4.813,
"theoretical_loss": 5.79147388513598,
"tokens_seen": 27787264
},
{
"epoch": 0.01,
"objective/train/docs_used": 27295,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 4.60736083984375,
"objective/train/theoretical_loss": 5.789243472598683,
"objective/train/tokens_used": 48312800,
"theoretical_loss": 5.789243472598683,
"tokens_seen": 27852800
},
{
"epoch": 0.01,
"learning_rate": 0.00042162698412698415,
"loss": 4.735,
"theoretical_loss": 5.789243472598683,
"tokens_seen": 27852800
},
{
"epoch": 0.01,
"learning_rate": 0.00042261904761904765,
"loss": 5.0103,
"theoretical_loss": 5.787019767443696,
"tokens_seen": 27918336
},
{
"epoch": 0.01,
"learning_rate": 0.0004236111111111111,
"loss": 4.9487,
"theoretical_loss": 5.784802733826166,
"tokens_seen": 27983872
},
{
"epoch": 0.01,
"learning_rate": 0.0004246031746031746,
"loss": 5.0393,
"theoretical_loss": 5.782592336176171,
"tokens_seen": 28049408
},
{
"epoch": 0.01,
"learning_rate": 0.0004255952380952381,
"loss": 4.8239,
"theoretical_loss": 5.780388539195972,
"tokens_seen": 28114944
},
{
"epoch": 0.01,
"learning_rate": 0.0004265873015873016,
"loss": 4.8301,
"theoretical_loss": 5.778191307857307,
"tokens_seen": 28180480
},
{
"epoch": 0.01,
"learning_rate": 0.00042757936507936505,
"loss": 4.8818,
"theoretical_loss": 5.776000607398707,
"tokens_seen": 28246016
},
{
"epoch": 0.01,
"learning_rate": 0.00042857142857142855,
"loss": 5.0365,
"theoretical_loss": 5.773816403322854,
"tokens_seen": 28311552
},
{
"epoch": 0.01,
"learning_rate": 0.00042956349206349205,
"loss": 4.8806,
"theoretical_loss": 5.7716386613939665,
"tokens_seen": 28377088
},
{
"epoch": 0.01,
"learning_rate": 0.0004305555555555556,
"loss": 4.8212,
"theoretical_loss": 5.769467347635221,
"tokens_seen": 28442624
},
{
"epoch": 0.01,
"learning_rate": 0.00043154761904761905,
"loss": 4.6856,
"theoretical_loss": 5.767302428326196,
"tokens_seen": 28508160
},
{
"epoch": 0.01,
"learning_rate": 0.00043253968253968256,
"loss": 5.1099,
"theoretical_loss": 5.765143870000358,
"tokens_seen": 28573696
},
{
"epoch": 0.01,
"learning_rate": 0.00043353174603174606,
"loss": 4.9942,
"theoretical_loss": 5.762991639442574,
"tokens_seen": 28639232
},
{
"epoch": 0.01,
"learning_rate": 0.00043452380952380956,
"loss": 4.7269,
"theoretical_loss": 5.7608457036866465,
"tokens_seen": 28704768
},
{
"epoch": 0.01,
"learning_rate": 0.000435515873015873,
"loss": 4.7079,
"theoretical_loss": 5.758706030012889,
"tokens_seen": 28770304
},
{
"epoch": 0.01,
"learning_rate": 0.0004365079365079365,
"loss": 5.0535,
"theoretical_loss": 5.756572585945728,
"tokens_seen": 28835840
},
{
"epoch": 0.01,
"learning_rate": 0.0004375,
"loss": 4.9953,
"theoretical_loss": 5.754445339251326,
"tokens_seen": 28901376
},
{
"epoch": 0.01,
"learning_rate": 0.0004384920634920635,
"loss": 4.8239,
"theoretical_loss": 5.752324257935244,
"tokens_seen": 28966912
},
{
"epoch": 0.01,
"learning_rate": 0.000439484126984127,
"loss": 4.8738,
"theoretical_loss": 5.750209310240125,
"tokens_seen": 29032448
},
{
"epoch": 0.01,
"learning_rate": 0.00044047619047619046,
"loss": 4.5774,
"theoretical_loss": 5.7481004646434055,
"tokens_seen": 29097984
},
{
"epoch": 0.01,
"learning_rate": 0.00044146825396825396,
"loss": 4.7821,
"theoretical_loss": 5.745997689855058,
"tokens_seen": 29163520
},
{
"epoch": 0.01,
"learning_rate": 0.00044246031746031746,
"loss": 4.9197,
"theoretical_loss": 5.743900954815356,
"tokens_seen": 29229056
},
{
"epoch": 0.01,
"learning_rate": 0.00044345238095238096,
"loss": 4.9178,
"theoretical_loss": 5.741810228692663,
"tokens_seen": 29294592
},
{
"epoch": 0.01,
"learning_rate": 0.0004444444444444444,
"loss": 4.9811,
"theoretical_loss": 5.739725480881262,
"tokens_seen": 29360128
},
{
"epoch": 0.01,
"learning_rate": 0.00044543650793650797,
"loss": 4.595,
"theoretical_loss": 5.737646680999193,
"tokens_seen": 29425664
},
{
"epoch": 0.01,
"objective/train/docs_used": 28553,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 4.193212032318115,
"objective/train/theoretical_loss": 5.73557379888612,
"objective/train/tokens_used": 49951200,
"theoretical_loss": 5.73557379888612,
"tokens_seen": 29491200
},
{
"epoch": 0.01,
"learning_rate": 0.00044642857142857147,
"loss": 4.6289,
"theoretical_loss": 5.73557379888612,
"tokens_seen": 29491200
},
{
"epoch": 0.01,
"learning_rate": 0.00044742063492063497,
"loss": 4.5175,
"theoretical_loss": 5.733506804601236,
"tokens_seen": 29556736
},
{
"epoch": 0.01,
"learning_rate": 0.0004484126984126984,
"loss": 4.97,
"theoretical_loss": 5.7314456684211725,
"tokens_seen": 29622272
},
{
"epoch": 0.01,
"learning_rate": 0.0004494047619047619,
"loss": 4.9007,
"theoretical_loss": 5.729390360837952,
"tokens_seen": 29687808
},
{
"epoch": 0.01,
"learning_rate": 0.0004503968253968254,
"loss": 5.0718,
"theoretical_loss": 5.7273408525569485,
"tokens_seen": 29753344
},
{
"epoch": 0.01,
"learning_rate": 0.0004513888888888889,
"loss": 4.8076,
"theoretical_loss": 5.725297114494884,
"tokens_seen": 29818880
},
{
"epoch": 0.01,
"learning_rate": 0.00045238095238095237,
"loss": 4.9151,
"theoretical_loss": 5.7232591177778405,
"tokens_seen": 29884416
},
{
"epoch": 0.01,
"learning_rate": 0.00045337301587301587,
"loss": 4.7611,
"theoretical_loss": 5.7212268337393,
"tokens_seen": 29949952
},
{
"epoch": 0.01,
"learning_rate": 0.00045436507936507937,
"loss": 4.6533,
"theoretical_loss": 5.719200233918203,
"tokens_seen": 30015488
},
{
"epoch": 0.01,
"learning_rate": 0.0004553571428571429,
"loss": 4.7689,
"theoretical_loss": 5.717179290057032,
"tokens_seen": 30081024
},
{
"epoch": 0.01,
"learning_rate": 0.0004563492063492063,
"loss": 4.753,
"theoretical_loss": 5.715163974099917,
"tokens_seen": 30146560
},
{
"epoch": 0.01,
"learning_rate": 0.0004573412698412698,
"loss": 4.9783,
"theoretical_loss": 5.713154258190757,
"tokens_seen": 30212096
},
{
"epoch": 0.01,
"learning_rate": 0.0004583333333333333,
"loss": 4.8919,
"theoretical_loss": 5.711150114671375,
"tokens_seen": 30277632
},
{
"epoch": 0.01,
"learning_rate": 0.0004593253968253968,
"loss": 4.6246,
"theoretical_loss": 5.709151516079683,
"tokens_seen": 30343168
},
{
"epoch": 0.01,
"learning_rate": 0.00046031746031746033,
"loss": 4.8047,
"theoretical_loss": 5.707158435147875,
"tokens_seen": 30408704
},
{
"epoch": 0.01,
"learning_rate": 0.00046130952380952383,
"loss": 4.7619,
"theoretical_loss": 5.705170844800628,
"tokens_seen": 30474240
},
{
"epoch": 0.01,
"learning_rate": 0.00046230158730158733,
"loss": 5.0389,
"theoretical_loss": 5.703188718153347,
"tokens_seen": 30539776
},
{
"epoch": 0.01,
"learning_rate": 0.00046329365079365083,
"loss": 4.7978,
"theoretical_loss": 5.7012120285104055,
"tokens_seen": 30605312
},
{
"epoch": 0.01,
"learning_rate": 0.00046428571428571433,
"loss": 4.8182,
"theoretical_loss": 5.699240749363417,
"tokens_seen": 30670848
},
{
"epoch": 0.01,
"learning_rate": 0.0004652777777777778,
"loss": 5.1046,
"theoretical_loss": 5.69727485438953,
"tokens_seen": 30736384
},
{
"epoch": 0.01,
"learning_rate": 0.0004662698412698413,
"loss": 4.9301,
"theoretical_loss": 5.695314317449732,
"tokens_seen": 30801920
},
{
"epoch": 0.01,
"learning_rate": 0.0004672619047619048,
"loss": 4.9513,
"theoretical_loss": 5.69335911258718,
"tokens_seen": 30867456
},
{
"epoch": 0.01,
"learning_rate": 0.0004682539682539683,
"loss": 4.7506,
"theoretical_loss": 5.691409214025544,
"tokens_seen": 30932992
},
{
"epoch": 0.01,
"learning_rate": 0.00046924603174603173,
"loss": 4.9203,
"theoretical_loss": 5.689464596167383,
"tokens_seen": 30998528
},
{
"epoch": 0.01,
"learning_rate": 0.00047023809523809523,
"loss": 4.6588,
"theoretical_loss": 5.687525233592513,
"tokens_seen": 31064064
},
{
"epoch": 0.01,
"objective/train/docs_used": 29210,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 4.980744361877441,
"objective/train/theoretical_loss": 5.685591101056422,
"objective/train/tokens_used": 51589600,
"theoretical_loss": 5.685591101056422,
"tokens_seen": 31129600
},
{
"epoch": 0.01,
"learning_rate": 0.00047123015873015874,
"loss": 4.8131,
"theoretical_loss": 5.685591101056422,
"tokens_seen": 31129600
},
{
"epoch": 0.01,
"learning_rate": 0.00047222222222222224,
"loss": 4.9729,
"theoretical_loss": 5.683662173488678,
"tokens_seen": 31195136
},
{
"epoch": 0.01,
"learning_rate": 0.0004732142857142857,
"loss": 4.9988,
"theoretical_loss": 5.681738425991377,
"tokens_seen": 31260672
},
{
"epoch": 0.01,
"learning_rate": 0.0004742063492063492,
"loss": 4.9296,
"theoretical_loss": 5.679819833837586,
"tokens_seen": 31326208
},
{
"epoch": 0.01,
"learning_rate": 0.0004751984126984127,
"loss": 4.8866,
"theoretical_loss": 5.677906372469826,
"tokens_seen": 31391744
},
{
"epoch": 0.01,
"learning_rate": 0.0004761904761904762,
"loss": 4.8619,
"theoretical_loss": 5.675998017498549,
"tokens_seen": 31457280
},
{
"epoch": 0.01,
"learning_rate": 0.0004771825396825397,
"loss": 4.4881,
"theoretical_loss": 5.674094744700648,
"tokens_seen": 31522816
},
{
"epoch": 0.01,
"learning_rate": 0.0004781746031746032,
"loss": 4.7385,
"theoretical_loss": 5.672196530017979,
"tokens_seen": 31588352
},
{
"epoch": 0.01,
"learning_rate": 0.0004791666666666667,
"loss": 4.9446,
"theoretical_loss": 5.670303349555893,
"tokens_seen": 31653888
},
{
"epoch": 0.01,
"learning_rate": 0.0004801587301587302,
"loss": 4.5805,
"theoretical_loss": 5.668415179581795,
"tokens_seen": 31719424
},
{
"epoch": 0.01,
"learning_rate": 0.0004811507936507937,
"loss": 4.943,
"theoretical_loss": 5.666531996523711,
"tokens_seen": 31784960
},
{
"epoch": 0.01,
"learning_rate": 0.00048214285714285715,
"loss": 4.9994,
"theoretical_loss": 5.664653776968867,
"tokens_seen": 31850496
},
{
"epoch": 0.01,
"learning_rate": 0.00048313492063492065,
"loss": 4.9432,
"theoretical_loss": 5.6627804976622995,
"tokens_seen": 31916032
},
{
"epoch": 0.01,
"learning_rate": 0.00048412698412698415,
"loss": 4.4612,
"theoretical_loss": 5.660912135505461,
"tokens_seen": 31981568
},
{
"epoch": 0.01,
"learning_rate": 0.00048511904761904765,
"loss": 5.0296,
"theoretical_loss": 5.659048667554853,
"tokens_seen": 32047104
},
{
"epoch": 0.01,
"learning_rate": 0.0004861111111111111,
"loss": 4.7342,
"theoretical_loss": 5.657190071020672,
"tokens_seen": 32112640
},
{
"epoch": 0.01,
"learning_rate": 0.0004871031746031746,
"loss": 5.0271,
"theoretical_loss": 5.65533632326547,
"tokens_seen": 32178176
},
{
"epoch": 0.01,
"learning_rate": 0.0004880952380952381,
"loss": 4.5208,
"theoretical_loss": 5.6534874018028205,
"tokens_seen": 32243712
},
{
"epoch": 0.01,
"learning_rate": 0.0004890873015873016,
"loss": 4.8883,
"theoretical_loss": 5.651643284296018,
"tokens_seen": 32309248
},
{
"epoch": 0.01,
"learning_rate": 0.0004900793650793651,
"loss": 4.5918,
"theoretical_loss": 5.6498039485567695,
"tokens_seen": 32374784
},
{
"epoch": 0.01,
"learning_rate": 0.0004910714285714286,
"loss": 4.8362,
"theoretical_loss": 5.6479693725439155,
"tokens_seen": 32440320
},
{
"epoch": 0.01,
"learning_rate": 0.000492063492063492,
"loss": 4.4816,
"theoretical_loss": 5.646139534362161,
"tokens_seen": 32505856
},
{
"epoch": 0.01,
"learning_rate": 0.0004930555555555556,
"loss": 4.7304,
"theoretical_loss": 5.644314412260815,
"tokens_seen": 32571392
},
{
"epoch": 0.01,
"learning_rate": 0.0004940476190476191,
"loss": 4.7639,
"theoretical_loss": 5.642493984632544,
"tokens_seen": 32636928
},
{
"epoch": 0.01,
"learning_rate": 0.0004950396825396826,
"loss": 4.486,
"theoretical_loss": 5.640678230012151,
"tokens_seen": 32702464
},
{
"epoch": 0.01,
"objective/train/docs_used": 30192,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 4.205135822296143,
"objective/train/theoretical_loss": 5.638867127075349,
"objective/train/tokens_used": 53228000,
"theoretical_loss": 5.638867127075349,
"tokens_seen": 32768000
},
{
"epoch": 0.01,
"learning_rate": 0.000496031746031746,
"loss": 4.6383,
"theoretical_loss": 5.638867127075349,
"tokens_seen": 32768000
}
],
"max_steps": 50354,
"num_train_epochs": 9223372036854775807,
"total_flos": 1.6722690048e+16,
"trial_name": null,
"trial_params": null
}