Transformers
PyTorch
English
gpt2
Generated from Trainer
Inference Endpoints
text-generation-inference
eloquent_keller / checkpoint-502 /trainer_state.json
tomekkorbak's picture
Training in progress, step 502
b8d757a
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.21253175275190517,
"global_step": 502,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.166666666666667e-06,
"loss": 3.0643,
"theoretical_loss": 3.321567680436603,
"tokens_seen": 2990538752
},
{
"epoch": 0.0,
"learning_rate": 8.333333333333334e-06,
"loss": 3.0798,
"theoretical_loss": 3.3215564803546,
"tokens_seen": 2990669824
},
{
"epoch": 0.0,
"learning_rate": 1.25e-05,
"loss": 2.9318,
"theoretical_loss": 3.321545280900887,
"tokens_seen": 2990800896
},
{
"epoch": 0.0,
"learning_rate": 1.6666666666666667e-05,
"loss": 2.8098,
"theoretical_loss": 3.3215340820754022,
"tokens_seen": 2990931968
},
{
"epoch": 0.0,
"learning_rate": 2.0833333333333336e-05,
"loss": 2.7055,
"theoretical_loss": 3.3215228838780817,
"tokens_seen": 2991063040
},
{
"epoch": 0.0,
"learning_rate": 2.5e-05,
"loss": 2.9762,
"theoretical_loss": 3.3215116863088636,
"tokens_seen": 2991194112
},
{
"epoch": 0.0,
"learning_rate": 2.916666666666667e-05,
"loss": 2.8724,
"theoretical_loss": 3.3215004893676854,
"tokens_seen": 2991325184
},
{
"epoch": 0.0,
"learning_rate": 3.3333333333333335e-05,
"loss": 3.0452,
"theoretical_loss": 3.321489293054483,
"tokens_seen": 2991456256
},
{
"epoch": 0.0,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.8676,
"theoretical_loss": 3.321478097369195,
"tokens_seen": 2991587328
},
{
"epoch": 0.0,
"learning_rate": 4.166666666666667e-05,
"loss": 2.8343,
"theoretical_loss": 3.321466902311758,
"tokens_seen": 2991718400
},
{
"epoch": 0.0,
"learning_rate": 4.5833333333333334e-05,
"loss": 2.743,
"theoretical_loss": 3.3214557078821096,
"tokens_seen": 2991849472
},
{
"epoch": 0.01,
"learning_rate": 5e-05,
"loss": 2.5867,
"theoretical_loss": 3.321444514080187,
"tokens_seen": 2991980544
},
{
"epoch": 0.01,
"objective/train/docs_used": 1640856,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7297048568725586,
"objective/train/theoretical_loss": 3.321438917414603,
"objective/train/tokens_used": 22097376,
"theoretical_loss": 3.321438917414603,
"tokens_seen": 2992046080
},
{
"epoch": 0.01,
"learning_rate": 5.4166666666666664e-05,
"loss": 2.7262,
"theoretical_loss": 3.321433320905927,
"tokens_seen": 2992111616
},
{
"epoch": 0.01,
"learning_rate": 5.833333333333334e-05,
"loss": 2.6517,
"theoretical_loss": 3.3214221283592678,
"tokens_seen": 2992242688
},
{
"epoch": 0.01,
"learning_rate": 6.25e-05,
"loss": 2.9399,
"theoretical_loss": 3.321410936440146,
"tokens_seen": 2992373760
},
{
"epoch": 0.01,
"learning_rate": 6.666666666666667e-05,
"loss": 2.7939,
"theoretical_loss": 3.3213997451485,
"tokens_seen": 2992504832
},
{
"epoch": 0.01,
"learning_rate": 7.083333333333334e-05,
"loss": 2.5715,
"theoretical_loss": 3.3213885544842654,
"tokens_seen": 2992635904
},
{
"epoch": 0.01,
"learning_rate": 7.500000000000001e-05,
"loss": 2.6047,
"theoretical_loss": 3.321377364447381,
"tokens_seen": 2992766976
},
{
"epoch": 0.01,
"learning_rate": 7.916666666666666e-05,
"loss": 2.6736,
"theoretical_loss": 3.3213661750377836,
"tokens_seen": 2992898048
},
{
"epoch": 0.01,
"learning_rate": 8.333333333333334e-05,
"loss": 2.6853,
"theoretical_loss": 3.3213549862554106,
"tokens_seen": 2993029120
},
{
"epoch": 0.01,
"learning_rate": 8.75e-05,
"loss": 2.3665,
"theoretical_loss": 3.3213437981001994,
"tokens_seen": 2993160192
},
{
"epoch": 0.01,
"learning_rate": 9.166666666666667e-05,
"loss": 2.618,
"theoretical_loss": 3.3213326105720875,
"tokens_seen": 2993291264
},
{
"epoch": 0.01,
"learning_rate": 9.583333333333334e-05,
"loss": 2.6614,
"theoretical_loss": 3.3213214236710122,
"tokens_seen": 2993422336
},
{
"epoch": 0.01,
"learning_rate": 0.0001,
"loss": 2.6163,
"theoretical_loss": 3.321310237396911,
"tokens_seen": 2993553408
},
{
"epoch": 0.01,
"objective/train/docs_used": 1641461,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5642035007476807,
"objective/train/theoretical_loss": 3.3212990517497207,
"objective/train/tokens_used": 23735776,
"theoretical_loss": 3.3212990517497207,
"tokens_seen": 2993684480
},
{
"epoch": 0.01,
"learning_rate": 9.995722840034217e-05,
"loss": 2.7218,
"theoretical_loss": 3.3212990517497207,
"tokens_seen": 2993684480
},
{
"epoch": 0.01,
"learning_rate": 9.991445680068435e-05,
"loss": 2.5137,
"theoretical_loss": 3.3212878667293797,
"tokens_seen": 2993815552
},
{
"epoch": 0.01,
"learning_rate": 9.987168520102653e-05,
"loss": 2.5283,
"theoretical_loss": 3.321276682335825,
"tokens_seen": 2993946624
},
{
"epoch": 0.01,
"learning_rate": 9.98289136013687e-05,
"loss": 2.6367,
"theoretical_loss": 3.3212654985689936,
"tokens_seen": 2994077696
},
{
"epoch": 0.01,
"learning_rate": 9.978614200171087e-05,
"loss": 2.5822,
"theoretical_loss": 3.3212543154288237,
"tokens_seen": 2994208768
},
{
"epoch": 0.01,
"learning_rate": 9.974337040205303e-05,
"loss": 2.6296,
"theoretical_loss": 3.3212431329152525,
"tokens_seen": 2994339840
},
{
"epoch": 0.01,
"learning_rate": 9.970059880239521e-05,
"loss": 2.5596,
"theoretical_loss": 3.321231951028217,
"tokens_seen": 2994470912
},
{
"epoch": 0.01,
"learning_rate": 9.965782720273739e-05,
"loss": 2.5663,
"theoretical_loss": 3.3212207697676552,
"tokens_seen": 2994601984
},
{
"epoch": 0.01,
"learning_rate": 9.961505560307956e-05,
"loss": 2.5138,
"theoretical_loss": 3.3212095891335043,
"tokens_seen": 2994733056
},
{
"epoch": 0.01,
"learning_rate": 9.957228400342173e-05,
"loss": 2.5938,
"theoretical_loss": 3.321198409125702,
"tokens_seen": 2994864128
},
{
"epoch": 0.01,
"learning_rate": 9.95295124037639e-05,
"loss": 2.4583,
"theoretical_loss": 3.321187229744186,
"tokens_seen": 2994995200
},
{
"epoch": 0.02,
"learning_rate": 9.948674080410608e-05,
"loss": 2.5265,
"theoretical_loss": 3.321176050988893,
"tokens_seen": 2995126272
},
{
"epoch": 0.02,
"learning_rate": 9.944396920444825e-05,
"loss": 2.7376,
"theoretical_loss": 3.3211648728597614,
"tokens_seen": 2995257344
},
{
"epoch": 0.02,
"objective/train/docs_used": 1642666,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7178521156311035,
"objective/train/theoretical_loss": 3.3211592840299864,
"objective/train/tokens_used": 25374176,
"theoretical_loss": 3.3211592840299864,
"tokens_seen": 2995322880
},
{
"epoch": 0.02,
"learning_rate": 9.940119760479042e-05,
"loss": 2.4464,
"theoretical_loss": 3.3211536953567284,
"tokens_seen": 2995388416
},
{
"epoch": 0.02,
"learning_rate": 9.93584260051326e-05,
"loss": 2.657,
"theoretical_loss": 3.321142518479731,
"tokens_seen": 2995519488
},
{
"epoch": 0.02,
"learning_rate": 9.931565440547476e-05,
"loss": 2.5525,
"theoretical_loss": 3.321131342228708,
"tokens_seen": 2995650560
},
{
"epoch": 0.02,
"learning_rate": 9.927288280581694e-05,
"loss": 2.6099,
"theoretical_loss": 3.321120166603596,
"tokens_seen": 2995781632
},
{
"epoch": 0.02,
"learning_rate": 9.923011120615912e-05,
"loss": 2.4712,
"theoretical_loss": 3.3211089916043326,
"tokens_seen": 2995912704
},
{
"epoch": 0.02,
"learning_rate": 9.918733960650128e-05,
"loss": 2.514,
"theoretical_loss": 3.3210978172308554,
"tokens_seen": 2996043776
},
{
"epoch": 0.02,
"learning_rate": 9.914456800684346e-05,
"loss": 2.4037,
"theoretical_loss": 3.3210866434831026,
"tokens_seen": 2996174848
},
{
"epoch": 0.02,
"learning_rate": 9.910179640718563e-05,
"loss": 2.4715,
"theoretical_loss": 3.3210754703610106,
"tokens_seen": 2996305920
},
{
"epoch": 0.02,
"learning_rate": 9.90590248075278e-05,
"loss": 2.4805,
"theoretical_loss": 3.321064297864518,
"tokens_seen": 2996436992
},
{
"epoch": 0.02,
"learning_rate": 9.901625320786998e-05,
"loss": 2.4755,
"theoretical_loss": 3.3210531259935627,
"tokens_seen": 2996568064
},
{
"epoch": 0.02,
"learning_rate": 9.897348160821215e-05,
"loss": 2.5174,
"theoretical_loss": 3.321041954748081,
"tokens_seen": 2996699136
},
{
"epoch": 0.02,
"learning_rate": 9.893071000855433e-05,
"loss": 2.5996,
"theoretical_loss": 3.321030784128012,
"tokens_seen": 2996830208
},
{
"epoch": 0.02,
"objective/train/docs_used": 1643300,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.879695415496826,
"objective/train/theoretical_loss": 3.321019614133292,
"objective/train/tokens_used": 27012576,
"theoretical_loss": 3.321019614133292,
"tokens_seen": 2996961280
},
{
"epoch": 0.02,
"learning_rate": 9.888793840889649e-05,
"loss": 2.5696,
"theoretical_loss": 3.321019614133292,
"tokens_seen": 2996961280
},
{
"epoch": 0.02,
"learning_rate": 9.884516680923867e-05,
"loss": 2.607,
"theoretical_loss": 3.3210084447638595,
"tokens_seen": 2997092352
},
{
"epoch": 0.02,
"learning_rate": 9.880239520958085e-05,
"loss": 2.4604,
"theoretical_loss": 3.320997276019652,
"tokens_seen": 2997223424
},
{
"epoch": 0.02,
"learning_rate": 9.875962360992301e-05,
"loss": 2.4603,
"theoretical_loss": 3.3209861079006067,
"tokens_seen": 2997354496
},
{
"epoch": 0.02,
"learning_rate": 9.871685201026519e-05,
"loss": 2.4374,
"theoretical_loss": 3.320974940406662,
"tokens_seen": 2997485568
},
{
"epoch": 0.02,
"learning_rate": 9.867408041060736e-05,
"loss": 2.5285,
"theoretical_loss": 3.320963773537755,
"tokens_seen": 2997616640
},
{
"epoch": 0.02,
"learning_rate": 9.863130881094953e-05,
"loss": 2.5895,
"theoretical_loss": 3.320952607293824,
"tokens_seen": 2997747712
},
{
"epoch": 0.02,
"learning_rate": 9.858853721129171e-05,
"loss": 2.5835,
"theoretical_loss": 3.320941441674806,
"tokens_seen": 2997878784
},
{
"epoch": 0.02,
"learning_rate": 9.854576561163388e-05,
"loss": 2.7362,
"theoretical_loss": 3.320930276680639,
"tokens_seen": 2998009856
},
{
"epoch": 0.02,
"learning_rate": 9.850299401197606e-05,
"loss": 2.5553,
"theoretical_loss": 3.3209191123112607,
"tokens_seen": 2998140928
},
{
"epoch": 0.03,
"learning_rate": 9.846022241231822e-05,
"loss": 2.6403,
"theoretical_loss": 3.320907948566609,
"tokens_seen": 2998272000
},
{
"epoch": 0.03,
"learning_rate": 9.84174508126604e-05,
"loss": 2.5129,
"theoretical_loss": 3.3208967854466214,
"tokens_seen": 2998403072
},
{
"epoch": 0.03,
"learning_rate": 9.837467921300258e-05,
"loss": 2.4238,
"theoretical_loss": 3.3208856229512356,
"tokens_seen": 2998534144
},
{
"epoch": 0.03,
"objective/train/docs_used": 1644380,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4041128158569336,
"objective/train/theoretical_loss": 3.320880041937749,
"objective/train/tokens_used": 28650976,
"theoretical_loss": 3.320880041937749,
"tokens_seen": 2998599680
},
{
"epoch": 0.03,
"learning_rate": 9.833190761334474e-05,
"loss": 2.3705,
"theoretical_loss": 3.3208744610803898,
"tokens_seen": 2998665216
},
{
"epoch": 0.03,
"learning_rate": 9.828913601368692e-05,
"loss": 2.4602,
"theoretical_loss": 3.320863299834021,
"tokens_seen": 2998796288
},
{
"epoch": 0.03,
"learning_rate": 9.824636441402908e-05,
"loss": 2.5492,
"theoretical_loss": 3.320852139212068,
"tokens_seen": 2998927360
},
{
"epoch": 0.03,
"learning_rate": 9.820359281437126e-05,
"loss": 2.426,
"theoretical_loss": 3.3208409792144677,
"tokens_seen": 2999058432
},
{
"epoch": 0.03,
"learning_rate": 9.816082121471344e-05,
"loss": 2.4079,
"theoretical_loss": 3.320829819841158,
"tokens_seen": 2999189504
},
{
"epoch": 0.03,
"learning_rate": 9.81180496150556e-05,
"loss": 2.5189,
"theoretical_loss": 3.320818661092077,
"tokens_seen": 2999320576
},
{
"epoch": 0.03,
"learning_rate": 9.807527801539777e-05,
"loss": 2.4085,
"theoretical_loss": 3.3208075029671624,
"tokens_seen": 2999451648
},
{
"epoch": 0.03,
"learning_rate": 9.803250641573995e-05,
"loss": 2.4031,
"theoretical_loss": 3.320796345466352,
"tokens_seen": 2999582720
},
{
"epoch": 0.03,
"learning_rate": 9.798973481608213e-05,
"loss": 2.5067,
"theoretical_loss": 3.320785188589584,
"tokens_seen": 2999713792
},
{
"epoch": 0.03,
"learning_rate": 9.79469632164243e-05,
"loss": 2.4357,
"theoretical_loss": 3.3207740323367956,
"tokens_seen": 2999844864
},
{
"epoch": 0.03,
"learning_rate": 9.790419161676647e-05,
"loss": 2.4929,
"theoretical_loss": 3.3207628767079242,
"tokens_seen": 2999975936
},
{
"epoch": 0.03,
"learning_rate": 9.786142001710863e-05,
"loss": 2.4052,
"theoretical_loss": 3.3207517217029094,
"tokens_seen": 3000107008
},
{
"epoch": 0.03,
"objective/train/docs_used": 1645056,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3271520137786865,
"objective/train/theoretical_loss": 3.3207405673216877,
"objective/train/tokens_used": 30289376,
"theoretical_loss": 3.3207405673216877,
"tokens_seen": 3000238080
},
{
"epoch": 0.03,
"learning_rate": 9.781864841745081e-05,
"loss": 2.5201,
"theoretical_loss": 3.3207405673216877,
"tokens_seen": 3000238080
},
{
"epoch": 0.03,
"learning_rate": 9.777587681779299e-05,
"loss": 2.5431,
"theoretical_loss": 3.320729413564197,
"tokens_seen": 3000369152
},
{
"epoch": 0.03,
"learning_rate": 9.773310521813517e-05,
"loss": 2.5359,
"theoretical_loss": 3.3207182604303753,
"tokens_seen": 3000500224
},
{
"epoch": 0.03,
"learning_rate": 9.769033361847733e-05,
"loss": 2.1929,
"theoretical_loss": 3.320707107920161,
"tokens_seen": 3000631296
},
{
"epoch": 0.03,
"learning_rate": 9.76475620188195e-05,
"loss": 2.5196,
"theoretical_loss": 3.3206959560334917,
"tokens_seen": 3000762368
},
{
"epoch": 0.03,
"learning_rate": 9.760479041916169e-05,
"loss": 2.3645,
"theoretical_loss": 3.320684804770305,
"tokens_seen": 3000893440
},
{
"epoch": 0.03,
"learning_rate": 9.756201881950386e-05,
"loss": 2.5329,
"theoretical_loss": 3.3206736541305393,
"tokens_seen": 3001024512
},
{
"epoch": 0.03,
"learning_rate": 9.751924721984602e-05,
"loss": 2.533,
"theoretical_loss": 3.3206625041141318,
"tokens_seen": 3001155584
},
{
"epoch": 0.04,
"learning_rate": 9.74764756201882e-05,
"loss": 2.4702,
"theoretical_loss": 3.3206513547210212,
"tokens_seen": 3001286656
},
{
"epoch": 0.04,
"learning_rate": 9.743370402053036e-05,
"loss": 2.6255,
"theoretical_loss": 3.320640205951145,
"tokens_seen": 3001417728
},
{
"epoch": 0.04,
"learning_rate": 9.739093242087256e-05,
"loss": 2.5139,
"theoretical_loss": 3.3206290578044415,
"tokens_seen": 3001548800
},
{
"epoch": 0.04,
"learning_rate": 9.734816082121472e-05,
"loss": 2.3796,
"theoretical_loss": 3.3206179102808484,
"tokens_seen": 3001679872
},
{
"epoch": 0.04,
"learning_rate": 9.730538922155689e-05,
"loss": 2.4997,
"theoretical_loss": 3.3206067633803036,
"tokens_seen": 3001810944
},
{
"epoch": 0.04,
"objective/train/docs_used": 1646327,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.398160219192505,
"objective/train/theoretical_loss": 3.320601190163655,
"objective/train/tokens_used": 31927776,
"theoretical_loss": 3.320601190163655,
"tokens_seen": 3001876480
},
{
"epoch": 0.04,
"learning_rate": 9.726261762189906e-05,
"loss": 2.3607,
"theoretical_loss": 3.320595617102745,
"tokens_seen": 3001942016
},
{
"epoch": 0.04,
"learning_rate": 9.721984602224123e-05,
"loss": 2.4803,
"theoretical_loss": 3.320584471448111,
"tokens_seen": 3002073088
},
{
"epoch": 0.04,
"learning_rate": 9.717707442258342e-05,
"loss": 2.348,
"theoretical_loss": 3.3205733264163393,
"tokens_seen": 3002204160
},
{
"epoch": 0.04,
"learning_rate": 9.713430282292558e-05,
"loss": 2.2828,
"theoretical_loss": 3.320562182007368,
"tokens_seen": 3002335232
},
{
"epoch": 0.04,
"learning_rate": 9.709153122326775e-05,
"loss": 2.4615,
"theoretical_loss": 3.320551038221135,
"tokens_seen": 3002466304
},
{
"epoch": 0.04,
"learning_rate": 9.704875962360993e-05,
"loss": 2.3988,
"theoretical_loss": 3.3205398950575784,
"tokens_seen": 3002597376
},
{
"epoch": 0.04,
"learning_rate": 9.700598802395209e-05,
"loss": 2.5582,
"theoretical_loss": 3.320528752516636,
"tokens_seen": 3002728448
},
{
"epoch": 0.04,
"learning_rate": 9.696321642429428e-05,
"loss": 2.3266,
"theoretical_loss": 3.3205176105982463,
"tokens_seen": 3002859520
},
{
"epoch": 0.04,
"learning_rate": 9.692044482463645e-05,
"loss": 2.5922,
"theoretical_loss": 3.320506469302347,
"tokens_seen": 3002990592
},
{
"epoch": 0.04,
"learning_rate": 9.687767322497861e-05,
"loss": 2.4959,
"theoretical_loss": 3.3204953286288763,
"tokens_seen": 3003121664
},
{
"epoch": 0.04,
"learning_rate": 9.683490162532079e-05,
"loss": 2.6068,
"theoretical_loss": 3.3204841885777725,
"tokens_seen": 3003252736
},
{
"epoch": 0.04,
"learning_rate": 9.679213002566297e-05,
"loss": 2.3996,
"theoretical_loss": 3.3204730491489727,
"tokens_seen": 3003383808
},
{
"epoch": 0.04,
"objective/train/docs_used": 1647543,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3885486125946045,
"objective/train/theoretical_loss": 3.3204619103424164,
"objective/train/tokens_used": 33566176,
"theoretical_loss": 3.3204619103424164,
"tokens_seen": 3003514880
},
{
"epoch": 0.04,
"learning_rate": 9.674935842600514e-05,
"loss": 2.4692,
"theoretical_loss": 3.3204619103424164,
"tokens_seen": 3003514880
},
{
"epoch": 0.04,
"learning_rate": 9.670658682634731e-05,
"loss": 2.35,
"theoretical_loss": 3.3204507721580403,
"tokens_seen": 3003645952
},
{
"epoch": 0.04,
"learning_rate": 9.666381522668948e-05,
"loss": 2.3168,
"theoretical_loss": 3.3204396345957834,
"tokens_seen": 3003777024
},
{
"epoch": 0.04,
"learning_rate": 9.662104362703166e-05,
"loss": 2.4671,
"theoretical_loss": 3.320428497655584,
"tokens_seen": 3003908096
},
{
"epoch": 0.04,
"learning_rate": 9.657827202737383e-05,
"loss": 2.552,
"theoretical_loss": 3.320417361337379,
"tokens_seen": 3004039168
},
{
"epoch": 0.04,
"learning_rate": 9.6535500427716e-05,
"loss": 2.3902,
"theoretical_loss": 3.3204062256411078,
"tokens_seen": 3004170240
},
{
"epoch": 0.04,
"learning_rate": 9.649272882805818e-05,
"loss": 2.491,
"theoretical_loss": 3.320395090566708,
"tokens_seen": 3004301312
},
{
"epoch": 0.05,
"learning_rate": 9.644995722840034e-05,
"loss": 2.4628,
"theoretical_loss": 3.3203839561141173,
"tokens_seen": 3004432384
},
{
"epoch": 0.05,
"learning_rate": 9.640718562874252e-05,
"loss": 2.4858,
"theoretical_loss": 3.320372822283275,
"tokens_seen": 3004563456
},
{
"epoch": 0.05,
"learning_rate": 9.63644140290847e-05,
"loss": 2.5038,
"theoretical_loss": 3.3203616890741183,
"tokens_seen": 3004694528
},
{
"epoch": 0.05,
"learning_rate": 9.632164242942686e-05,
"loss": 2.2651,
"theoretical_loss": 3.3203505564865856,
"tokens_seen": 3004825600
},
{
"epoch": 0.05,
"learning_rate": 9.627887082976904e-05,
"loss": 2.4591,
"theoretical_loss": 3.3203394245206153,
"tokens_seen": 3004956672
},
{
"epoch": 0.05,
"learning_rate": 9.623609923011121e-05,
"loss": 2.4003,
"theoretical_loss": 3.320328293176145,
"tokens_seen": 3005087744
},
{
"epoch": 0.05,
"objective/train/docs_used": 1648109,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3964030742645264,
"objective/train/theoretical_loss": 3.3203227277369534,
"objective/train/tokens_used": 35204576,
"theoretical_loss": 3.3203227277369534,
"tokens_seen": 3005153280
},
{
"epoch": 0.05,
"learning_rate": 9.619332763045337e-05,
"loss": 2.3952,
"theoretical_loss": 3.320317162453114,
"tokens_seen": 3005218816
},
{
"epoch": 0.05,
"learning_rate": 9.615055603079556e-05,
"loss": 2.4367,
"theoretical_loss": 3.3203060323514593,
"tokens_seen": 3005349888
},
{
"epoch": 0.05,
"learning_rate": 9.610778443113773e-05,
"loss": 2.4468,
"theoretical_loss": 3.3202949028711197,
"tokens_seen": 3005480960
},
{
"epoch": 0.05,
"learning_rate": 9.60650128314799e-05,
"loss": 2.3551,
"theoretical_loss": 3.3202837740120335,
"tokens_seen": 3005612032
},
{
"epoch": 0.05,
"learning_rate": 9.602224123182207e-05,
"loss": 2.3886,
"theoretical_loss": 3.3202726457741387,
"tokens_seen": 3005743104
},
{
"epoch": 0.05,
"learning_rate": 9.597946963216424e-05,
"loss": 2.4953,
"theoretical_loss": 3.320261518157374,
"tokens_seen": 3005874176
},
{
"epoch": 0.05,
"learning_rate": 9.593669803250643e-05,
"loss": 2.3074,
"theoretical_loss": 3.3202503911616765,
"tokens_seen": 3006005248
},
{
"epoch": 0.05,
"learning_rate": 9.589392643284859e-05,
"loss": 2.4135,
"theoretical_loss": 3.320239264786986,
"tokens_seen": 3006136320
},
{
"epoch": 0.05,
"learning_rate": 9.585115483319077e-05,
"loss": 2.431,
"theoretical_loss": 3.3202281390332393,
"tokens_seen": 3006267392
},
{
"epoch": 0.05,
"learning_rate": 9.580838323353294e-05,
"loss": 2.3277,
"theoretical_loss": 3.320217013900376,
"tokens_seen": 3006398464
},
{
"epoch": 0.05,
"learning_rate": 9.576561163387511e-05,
"loss": 2.5083,
"theoretical_loss": 3.3202058893883333,
"tokens_seen": 3006529536
},
{
"epoch": 0.05,
"learning_rate": 9.572284003421729e-05,
"loss": 2.4582,
"theoretical_loss": 3.3201947654970505,
"tokens_seen": 3006660608
},
{
"epoch": 0.05,
"objective/train/docs_used": 1649212,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5212416648864746,
"objective/train/theoretical_loss": 3.320183642226465,
"objective/train/tokens_used": 36842976,
"theoretical_loss": 3.320183642226465,
"tokens_seen": 3006791680
},
{
"epoch": 0.05,
"learning_rate": 9.568006843455946e-05,
"loss": 2.3596,
"theoretical_loss": 3.320183642226465,
"tokens_seen": 3006791680
},
{
"epoch": 0.05,
"learning_rate": 9.563729683490164e-05,
"loss": 2.3923,
"theoretical_loss": 3.3201725195765155,
"tokens_seen": 3006922752
},
{
"epoch": 0.05,
"learning_rate": 9.55945252352438e-05,
"loss": 2.3563,
"theoretical_loss": 3.3201613975471402,
"tokens_seen": 3007053824
},
{
"epoch": 0.05,
"learning_rate": 9.555175363558598e-05,
"loss": 2.4285,
"theoretical_loss": 3.3201502761382775,
"tokens_seen": 3007184896
},
{
"epoch": 0.05,
"learning_rate": 9.550898203592816e-05,
"loss": 2.2928,
"theoretical_loss": 3.320139155349866,
"tokens_seen": 3007315968
},
{
"epoch": 0.06,
"learning_rate": 9.546621043627032e-05,
"loss": 2.4617,
"theoretical_loss": 3.3201280351818436,
"tokens_seen": 3007447040
},
{
"epoch": 0.06,
"learning_rate": 9.542343883661249e-05,
"loss": 2.4107,
"theoretical_loss": 3.320116915634149,
"tokens_seen": 3007578112
},
{
"epoch": 0.06,
"learning_rate": 9.538066723695466e-05,
"loss": 2.5715,
"theoretical_loss": 3.3201057967067205,
"tokens_seen": 3007709184
},
{
"epoch": 0.06,
"learning_rate": 9.533789563729684e-05,
"loss": 2.4217,
"theoretical_loss": 3.3200946783994962,
"tokens_seen": 3007840256
},
{
"epoch": 0.06,
"learning_rate": 9.529512403763902e-05,
"loss": 2.4315,
"theoretical_loss": 3.3200835607124146,
"tokens_seen": 3007971328
},
{
"epoch": 0.06,
"learning_rate": 9.525235243798119e-05,
"loss": 2.4307,
"theoretical_loss": 3.3200724436454143,
"tokens_seen": 3008102400
},
{
"epoch": 0.06,
"learning_rate": 9.520958083832335e-05,
"loss": 2.5032,
"theoretical_loss": 3.3200613271984336,
"tokens_seen": 3008233472
},
{
"epoch": 0.06,
"learning_rate": 9.516680923866553e-05,
"loss": 2.4379,
"theoretical_loss": 3.3200502113714108,
"tokens_seen": 3008364544
},
{
"epoch": 0.06,
"objective/train/docs_used": 1649940,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.399945020675659,
"objective/train/theoretical_loss": 3.3200446536903643,
"objective/train/tokens_used": 38481376,
"theoretical_loss": 3.3200446536903643,
"tokens_seen": 3008430080
},
{
"epoch": 0.06,
"learning_rate": 9.512403763900771e-05,
"loss": 2.5107,
"theoretical_loss": 3.3200390961642845,
"tokens_seen": 3008495616
},
{
"epoch": 0.06,
"learning_rate": 9.508126603934989e-05,
"loss": 2.4659,
"theoretical_loss": 3.3200279815769926,
"tokens_seen": 3008626688
},
{
"epoch": 0.06,
"learning_rate": 9.503849443969205e-05,
"loss": 2.4327,
"theoretical_loss": 3.3200168676094743,
"tokens_seen": 3008757760
},
{
"epoch": 0.06,
"learning_rate": 9.499572284003421e-05,
"loss": 2.2681,
"theoretical_loss": 3.320005754261668,
"tokens_seen": 3008888832
},
{
"epoch": 0.06,
"learning_rate": 9.49529512403764e-05,
"loss": 2.3802,
"theoretical_loss": 3.319994641533511,
"tokens_seen": 3009019904
},
{
"epoch": 0.06,
"learning_rate": 9.491017964071857e-05,
"loss": 2.5461,
"theoretical_loss": 3.319983529424943,
"tokens_seen": 3009150976
},
{
"epoch": 0.06,
"learning_rate": 9.486740804106075e-05,
"loss": 2.4021,
"theoretical_loss": 3.3199724179359027,
"tokens_seen": 3009282048
},
{
"epoch": 0.06,
"learning_rate": 9.482463644140291e-05,
"loss": 2.4038,
"theoretical_loss": 3.319961307066327,
"tokens_seen": 3009413120
},
{
"epoch": 0.06,
"learning_rate": 9.478186484174508e-05,
"loss": 2.4687,
"theoretical_loss": 3.3199501968161558,
"tokens_seen": 3009544192
},
{
"epoch": 0.06,
"learning_rate": 9.473909324208726e-05,
"loss": 2.4401,
"theoretical_loss": 3.319939087185327,
"tokens_seen": 3009675264
},
{
"epoch": 0.06,
"learning_rate": 9.469632164242944e-05,
"loss": 2.4778,
"theoretical_loss": 3.3199279781737796,
"tokens_seen": 3009806336
},
{
"epoch": 0.06,
"learning_rate": 9.46535500427716e-05,
"loss": 2.4166,
"theoretical_loss": 3.3199168697814514,
"tokens_seen": 3009937408
},
{
"epoch": 0.06,
"objective/train/docs_used": 1651249,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.412529230117798,
"objective/train/theoretical_loss": 3.3199057620082812,
"objective/train/tokens_used": 40119776,
"theoretical_loss": 3.3199057620082812,
"tokens_seen": 3010068480
},
{
"epoch": 0.06,
"learning_rate": 9.461077844311378e-05,
"loss": 2.3839,
"theoretical_loss": 3.3199057620082812,
"tokens_seen": 3010068480
},
{
"epoch": 0.06,
"learning_rate": 9.456800684345594e-05,
"loss": 2.5879,
"theoretical_loss": 3.319894654854208,
"tokens_seen": 3010199552
},
{
"epoch": 0.06,
"learning_rate": 9.452523524379812e-05,
"loss": 2.444,
"theoretical_loss": 3.3198835483191695,
"tokens_seen": 3010330624
},
{
"epoch": 0.06,
"learning_rate": 9.44824636441403e-05,
"loss": 2.2849,
"theoretical_loss": 3.319872442403105,
"tokens_seen": 3010461696
},
{
"epoch": 0.07,
"learning_rate": 9.443969204448247e-05,
"loss": 2.568,
"theoretical_loss": 3.3198613371059524,
"tokens_seen": 3010592768
},
{
"epoch": 0.07,
"learning_rate": 9.439692044482464e-05,
"loss": 2.4131,
"theoretical_loss": 3.319850232427651,
"tokens_seen": 3010723840
},
{
"epoch": 0.07,
"learning_rate": 9.435414884516681e-05,
"loss": 2.4222,
"theoretical_loss": 3.3198391283681383,
"tokens_seen": 3010854912
},
{
"epoch": 0.07,
"learning_rate": 9.431137724550899e-05,
"loss": 2.3993,
"theoretical_loss": 3.3198280249273546,
"tokens_seen": 3010985984
},
{
"epoch": 0.07,
"learning_rate": 9.426860564585116e-05,
"loss": 2.5294,
"theoretical_loss": 3.319816922105237,
"tokens_seen": 3011117056
},
{
"epoch": 0.07,
"learning_rate": 9.422583404619333e-05,
"loss": 2.4529,
"theoretical_loss": 3.319805819901724,
"tokens_seen": 3011248128
},
{
"epoch": 0.07,
"learning_rate": 9.418306244653551e-05,
"loss": 2.4924,
"theoretical_loss": 3.3197947183167553,
"tokens_seen": 3011379200
},
{
"epoch": 0.07,
"learning_rate": 9.414029084687767e-05,
"loss": 2.6054,
"theoretical_loss": 3.319783617350269,
"tokens_seen": 3011510272
},
{
"epoch": 0.07,
"learning_rate": 9.409751924721985e-05,
"loss": 2.388,
"theoretical_loss": 3.319772517002204,
"tokens_seen": 3011641344
},
{
"epoch": 0.07,
"objective/train/docs_used": 1651905,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8164331912994385,
"objective/train/theoretical_loss": 3.31976696706006,
"objective/train/tokens_used": 41758176,
"theoretical_loss": 3.31976696706006,
"tokens_seen": 3011706880
},
{
"epoch": 0.07,
"learning_rate": 9.405474764756203e-05,
"loss": 2.4623,
"theoretical_loss": 3.319761417272498,
"tokens_seen": 3011772416
},
{
"epoch": 0.07,
"learning_rate": 9.40119760479042e-05,
"loss": 2.3955,
"theoretical_loss": 3.319750318161091,
"tokens_seen": 3011903488
},
{
"epoch": 0.07,
"learning_rate": 9.396920444824637e-05,
"loss": 2.2424,
"theoretical_loss": 3.3197392196679205,
"tokens_seen": 3012034560
},
{
"epoch": 0.07,
"learning_rate": 9.392643284858854e-05,
"loss": 2.3568,
"theoretical_loss": 3.3197281217929255,
"tokens_seen": 3012165632
},
{
"epoch": 0.07,
"learning_rate": 9.388366124893072e-05,
"loss": 2.3788,
"theoretical_loss": 3.319717024536045,
"tokens_seen": 3012296704
},
{
"epoch": 0.07,
"learning_rate": 9.38408896492729e-05,
"loss": 2.3081,
"theoretical_loss": 3.3197059278972176,
"tokens_seen": 3012427776
},
{
"epoch": 0.07,
"learning_rate": 9.379811804961506e-05,
"loss": 2.4277,
"theoretical_loss": 3.3196948318763817,
"tokens_seen": 3012558848
},
{
"epoch": 0.07,
"learning_rate": 9.375534644995724e-05,
"loss": 2.4135,
"theoretical_loss": 3.319683736473476,
"tokens_seen": 3012689920
},
{
"epoch": 0.07,
"learning_rate": 9.37125748502994e-05,
"loss": 2.3474,
"theoretical_loss": 3.3196726416884395,
"tokens_seen": 3012820992
},
{
"epoch": 0.07,
"learning_rate": 9.366980325064158e-05,
"loss": 2.5489,
"theoretical_loss": 3.3196615475212106,
"tokens_seen": 3012952064
},
{
"epoch": 0.07,
"learning_rate": 9.362703165098376e-05,
"loss": 2.4998,
"theoretical_loss": 3.3196504539717284,
"tokens_seen": 3013083136
},
{
"epoch": 0.07,
"learning_rate": 9.358426005132592e-05,
"loss": 2.5438,
"theoretical_loss": 3.3196393610399317,
"tokens_seen": 3013214208
},
{
"epoch": 0.07,
"objective/train/docs_used": 1652881,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3386740684509277,
"objective/train/theoretical_loss": 3.3196282687257583,
"objective/train/tokens_used": 43396576,
"theoretical_loss": 3.3196282687257583,
"tokens_seen": 3013345280
},
{
"epoch": 0.07,
"learning_rate": 9.35414884516681e-05,
"loss": 2.5351,
"theoretical_loss": 3.3196282687257583,
"tokens_seen": 3013345280
},
{
"epoch": 0.07,
"learning_rate": 9.349871685201027e-05,
"loss": 2.3127,
"theoretical_loss": 3.3196171770291483,
"tokens_seen": 3013476352
},
{
"epoch": 0.07,
"learning_rate": 9.345594525235244e-05,
"loss": 2.435,
"theoretical_loss": 3.3196060859500394,
"tokens_seen": 3013607424
},
{
"epoch": 0.08,
"learning_rate": 9.341317365269462e-05,
"loss": 2.3466,
"theoretical_loss": 3.319594995488371,
"tokens_seen": 3013738496
},
{
"epoch": 0.08,
"learning_rate": 9.337040205303679e-05,
"loss": 2.5683,
"theoretical_loss": 3.3195839056440812,
"tokens_seen": 3013869568
},
{
"epoch": 0.08,
"learning_rate": 9.332763045337895e-05,
"loss": 2.4838,
"theoretical_loss": 3.3195728164171094,
"tokens_seen": 3014000640
},
{
"epoch": 0.08,
"learning_rate": 9.328485885372113e-05,
"loss": 2.5372,
"theoretical_loss": 3.319561727807394,
"tokens_seen": 3014131712
},
{
"epoch": 0.08,
"learning_rate": 9.324208725406331e-05,
"loss": 2.4055,
"theoretical_loss": 3.3195506398148744,
"tokens_seen": 3014262784
},
{
"epoch": 0.08,
"learning_rate": 9.319931565440549e-05,
"loss": 2.5566,
"theoretical_loss": 3.319539552439489,
"tokens_seen": 3014393856
},
{
"epoch": 0.08,
"learning_rate": 9.315654405474765e-05,
"loss": 2.4051,
"theoretical_loss": 3.3195284656811763,
"tokens_seen": 3014524928
},
{
"epoch": 0.08,
"learning_rate": 9.311377245508982e-05,
"loss": 2.5032,
"theoretical_loss": 3.319517379539876,
"tokens_seen": 3014656000
},
{
"epoch": 0.08,
"learning_rate": 9.3071000855432e-05,
"loss": 2.427,
"theoretical_loss": 3.3195062940155258,
"tokens_seen": 3014787072
},
{
"epoch": 0.08,
"learning_rate": 9.302822925577417e-05,
"loss": 2.4468,
"theoretical_loss": 3.3194952091080654,
"tokens_seen": 3014918144
},
{
"epoch": 0.08,
"objective/train/docs_used": 1653310,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.2789435386657715,
"objective/train/theoretical_loss": 3.3194896668856497,
"objective/train/tokens_used": 45034976,
"theoretical_loss": 3.3194896668856497,
"tokens_seen": 3014983680
},
{
"epoch": 0.08,
"learning_rate": 9.298545765611635e-05,
"loss": 2.3447,
"theoretical_loss": 3.3194841248174334,
"tokens_seen": 3015049216
},
{
"epoch": 0.08,
"learning_rate": 9.294268605645852e-05,
"loss": 2.3587,
"theoretical_loss": 3.3194730411435684,
"tokens_seen": 3015180288
},
{
"epoch": 0.08,
"learning_rate": 9.289991445680068e-05,
"loss": 2.6264,
"theoretical_loss": 3.3194619580864098,
"tokens_seen": 3015311360
},
{
"epoch": 0.08,
"learning_rate": 9.285714285714286e-05,
"loss": 2.57,
"theoretical_loss": 3.3194508756458965,
"tokens_seen": 3015442432
},
{
"epoch": 0.08,
"learning_rate": 9.281437125748504e-05,
"loss": 2.3972,
"theoretical_loss": 3.319439793821967,
"tokens_seen": 3015573504
},
{
"epoch": 0.08,
"learning_rate": 9.27715996578272e-05,
"loss": 2.4522,
"theoretical_loss": 3.3194287126145596,
"tokens_seen": 3015704576
},
{
"epoch": 0.08,
"learning_rate": 9.272882805816938e-05,
"loss": 2.4546,
"theoretical_loss": 3.3194176320236144,
"tokens_seen": 3015835648
},
{
"epoch": 0.08,
"learning_rate": 9.268605645851154e-05,
"loss": 2.6088,
"theoretical_loss": 3.31940655204907,
"tokens_seen": 3015966720
},
{
"epoch": 0.08,
"learning_rate": 9.264328485885372e-05,
"loss": 2.4454,
"theoretical_loss": 3.319395472690865,
"tokens_seen": 3016097792
},
{
"epoch": 0.08,
"learning_rate": 9.26005132591959e-05,
"loss": 2.3876,
"theoretical_loss": 3.3193843939489382,
"tokens_seen": 3016228864
},
{
"epoch": 0.08,
"learning_rate": 9.255774165953807e-05,
"loss": 2.4971,
"theoretical_loss": 3.319373315823229,
"tokens_seen": 3016359936
},
{
"epoch": 0.08,
"learning_rate": 9.251497005988024e-05,
"loss": 2.5668,
"theoretical_loss": 3.3193622383136763,
"tokens_seen": 3016491008
},
{
"epoch": 0.08,
"objective/train/docs_used": 1654644,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.148563861846924,
"objective/train/theoretical_loss": 3.3193511614202187,
"objective/train/tokens_used": 46673376,
"theoretical_loss": 3.3193511614202187,
"tokens_seen": 3016622080
},
{
"epoch": 0.08,
"learning_rate": 9.247219846022241e-05,
"loss": 2.3435,
"theoretical_loss": 3.3193511614202187,
"tokens_seen": 3016622080
},
{
"epoch": 0.09,
"learning_rate": 9.242942686056459e-05,
"loss": 2.5671,
"theoretical_loss": 3.319340085142796,
"tokens_seen": 3016753152
},
{
"epoch": 0.09,
"learning_rate": 9.238665526090677e-05,
"loss": 2.455,
"theoretical_loss": 3.319329009481346,
"tokens_seen": 3016884224
},
{
"epoch": 0.09,
"learning_rate": 9.234388366124893e-05,
"loss": 2.5706,
"theoretical_loss": 3.3193179344358086,
"tokens_seen": 3017015296
},
{
"epoch": 0.09,
"learning_rate": 9.230111206159111e-05,
"loss": 2.3164,
"theoretical_loss": 3.319306860006122,
"tokens_seen": 3017146368
},
{
"epoch": 0.09,
"learning_rate": 9.225834046193327e-05,
"loss": 2.317,
"theoretical_loss": 3.319295786192226,
"tokens_seen": 3017277440
},
{
"epoch": 0.09,
"learning_rate": 9.221556886227547e-05,
"loss": 2.3955,
"theoretical_loss": 3.319284712994059,
"tokens_seen": 3017408512
},
{
"epoch": 0.09,
"learning_rate": 9.217279726261763e-05,
"loss": 2.4648,
"theoretical_loss": 3.3192736404115606,
"tokens_seen": 3017539584
},
{
"epoch": 0.09,
"learning_rate": 9.21300256629598e-05,
"loss": 2.3474,
"theoretical_loss": 3.3192625684446693,
"tokens_seen": 3017670656
},
{
"epoch": 0.09,
"learning_rate": 9.208725406330197e-05,
"loss": 2.2225,
"theoretical_loss": 3.3192514970933242,
"tokens_seen": 3017801728
},
{
"epoch": 0.09,
"learning_rate": 9.204448246364414e-05,
"loss": 2.4497,
"theoretical_loss": 3.319240426357465,
"tokens_seen": 3017932800
},
{
"epoch": 0.09,
"learning_rate": 9.200171086398632e-05,
"loss": 2.4471,
"theoretical_loss": 3.31922935623703,
"tokens_seen": 3018063872
},
{
"epoch": 0.09,
"learning_rate": 9.19589392643285e-05,
"loss": 2.3816,
"theoretical_loss": 3.3192182867319584,
"tokens_seen": 3018194944
},
{
"epoch": 0.09,
"objective/train/docs_used": 1655335,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.1034493446350098,
"objective/train/theoretical_loss": 3.319212752210165,
"objective/train/tokens_used": 48311776,
"theoretical_loss": 3.319212752210165,
"tokens_seen": 3018260480
},
{
"epoch": 0.09,
"learning_rate": 9.191616766467066e-05,
"loss": 2.3561,
"theoretical_loss": 3.3192072178421896,
"tokens_seen": 3018326016
},
{
"epoch": 0.09,
"learning_rate": 9.187339606501284e-05,
"loss": 2.4573,
"theoretical_loss": 3.319196149567662,
"tokens_seen": 3018457088
},
{
"epoch": 0.09,
"learning_rate": 9.1830624465355e-05,
"loss": 2.3837,
"theoretical_loss": 3.3191850819083157,
"tokens_seen": 3018588160
},
{
"epoch": 0.09,
"learning_rate": 9.178785286569718e-05,
"loss": 2.4923,
"theoretical_loss": 3.319174014864089,
"tokens_seen": 3018719232
},
{
"epoch": 0.09,
"learning_rate": 9.174508126603936e-05,
"loss": 2.6121,
"theoretical_loss": 3.319162948434921,
"tokens_seen": 3018850304
},
{
"epoch": 0.09,
"learning_rate": 9.170230966638152e-05,
"loss": 2.5103,
"theoretical_loss": 3.319151882620752,
"tokens_seen": 3018981376
},
{
"epoch": 0.09,
"learning_rate": 9.16595380667237e-05,
"loss": 2.3644,
"theoretical_loss": 3.3191408174215193,
"tokens_seen": 3019112448
},
{
"epoch": 0.09,
"learning_rate": 9.161676646706587e-05,
"loss": 2.5582,
"theoretical_loss": 3.3191297528371635,
"tokens_seen": 3019243520
},
{
"epoch": 0.09,
"learning_rate": 9.157399486740804e-05,
"loss": 2.3977,
"theoretical_loss": 3.319118688867623,
"tokens_seen": 3019374592
},
{
"epoch": 0.09,
"learning_rate": 9.153122326775022e-05,
"loss": 2.3253,
"theoretical_loss": 3.319107625512837,
"tokens_seen": 3019505664
},
{
"epoch": 0.09,
"learning_rate": 9.148845166809239e-05,
"loss": 2.5582,
"theoretical_loss": 3.3190965627727445,
"tokens_seen": 3019636736
},
{
"epoch": 0.09,
"learning_rate": 9.144568006843457e-05,
"loss": 2.402,
"theoretical_loss": 3.3190855006472857,
"tokens_seen": 3019767808
},
{
"epoch": 0.09,
"objective/train/docs_used": 1656670,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.027528762817383,
"objective/train/theoretical_loss": 3.3190744391363984,
"objective/train/tokens_used": 49950176,
"theoretical_loss": 3.3190744391363984,
"tokens_seen": 3019898880
},
{
"epoch": 0.1,
"learning_rate": 9.140290846877674e-05,
"loss": 2.4387,
"theoretical_loss": 3.3190744391363984,
"tokens_seen": 3019898880
},
{
"epoch": 0.1,
"learning_rate": 9.136013686911891e-05,
"loss": 2.5059,
"theoretical_loss": 3.3190633782400223,
"tokens_seen": 3020029952
},
{
"epoch": 0.1,
"learning_rate": 9.131736526946109e-05,
"loss": 2.3856,
"theoretical_loss": 3.3190523179580973,
"tokens_seen": 3020161024
},
{
"epoch": 0.1,
"learning_rate": 9.127459366980325e-05,
"loss": 2.5568,
"theoretical_loss": 3.3190412582905617,
"tokens_seen": 3020292096
},
{
"epoch": 0.1,
"learning_rate": 9.123182207014542e-05,
"loss": 2.5093,
"theoretical_loss": 3.319030199237355,
"tokens_seen": 3020423168
},
{
"epoch": 0.1,
"learning_rate": 9.118905047048761e-05,
"loss": 2.3476,
"theoretical_loss": 3.3190191407984164,
"tokens_seen": 3020554240
},
{
"epoch": 0.1,
"learning_rate": 9.114627887082977e-05,
"loss": 2.4233,
"theoretical_loss": 3.3190080829736854,
"tokens_seen": 3020685312
},
{
"epoch": 0.1,
"learning_rate": 9.110350727117195e-05,
"loss": 2.4302,
"theoretical_loss": 3.318997025763101,
"tokens_seen": 3020816384
},
{
"epoch": 0.1,
"learning_rate": 9.106073567151412e-05,
"loss": 2.5779,
"theoretical_loss": 3.318985969166602,
"tokens_seen": 3020947456
},
{
"epoch": 0.1,
"learning_rate": 9.101796407185628e-05,
"loss": 2.4611,
"theoretical_loss": 3.3189749131841286,
"tokens_seen": 3021078528
},
{
"epoch": 0.1,
"learning_rate": 9.097519247219847e-05,
"loss": 2.3387,
"theoretical_loss": 3.3189638578156195,
"tokens_seen": 3021209600
},
{
"epoch": 0.1,
"learning_rate": 9.093242087254064e-05,
"loss": 2.6853,
"theoretical_loss": 3.3189528030610136,
"tokens_seen": 3021340672
},
{
"epoch": 0.1,
"learning_rate": 9.088964927288282e-05,
"loss": 2.4887,
"theoretical_loss": 3.318941748920251,
"tokens_seen": 3021471744
},
{
"epoch": 0.1,
"objective/train/docs_used": 1657192,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7123944759368896,
"objective/train/theoretical_loss": 3.318936222080042,
"objective/train/tokens_used": 51588576,
"theoretical_loss": 3.318936222080042,
"tokens_seen": 3021537280
},
{
"epoch": 0.1,
"learning_rate": 9.084687767322498e-05,
"loss": 2.4619,
"theoretical_loss": 3.318930695393271,
"tokens_seen": 3021602816
},
{
"epoch": 0.1,
"learning_rate": 9.080410607356715e-05,
"loss": 2.4821,
"theoretical_loss": 3.3189196424800116,
"tokens_seen": 3021733888
},
{
"epoch": 0.1,
"learning_rate": 9.076133447390934e-05,
"loss": 2.4471,
"theoretical_loss": 3.3189085901804134,
"tokens_seen": 3021864960
},
{
"epoch": 0.1,
"learning_rate": 9.07185628742515e-05,
"loss": 2.3988,
"theoretical_loss": 3.3188975384944155,
"tokens_seen": 3021996032
},
{
"epoch": 0.1,
"learning_rate": 9.067579127459367e-05,
"loss": 2.469,
"theoretical_loss": 3.318886487421957,
"tokens_seen": 3022127104
},
{
"epoch": 0.1,
"learning_rate": 9.063301967493585e-05,
"loss": 2.5398,
"theoretical_loss": 3.318875436962977,
"tokens_seen": 3022258176
},
{
"epoch": 0.1,
"learning_rate": 9.059024807527801e-05,
"loss": 2.3125,
"theoretical_loss": 3.3188643871174155,
"tokens_seen": 3022389248
},
{
"epoch": 0.1,
"learning_rate": 9.05474764756202e-05,
"loss": 2.3502,
"theoretical_loss": 3.318853337885211,
"tokens_seen": 3022520320
},
{
"epoch": 0.1,
"learning_rate": 9.050470487596237e-05,
"loss": 2.4073,
"theoretical_loss": 3.318842289266304,
"tokens_seen": 3022651392
},
{
"epoch": 0.1,
"learning_rate": 9.046193327630453e-05,
"loss": 2.3899,
"theoretical_loss": 3.3188312412606327,
"tokens_seen": 3022782464
},
{
"epoch": 0.1,
"learning_rate": 9.041916167664671e-05,
"loss": 2.3516,
"theoretical_loss": 3.3188201938681368,
"tokens_seen": 3022913536
},
{
"epoch": 0.11,
"learning_rate": 9.037639007698889e-05,
"loss": 2.3968,
"theoretical_loss": 3.318809147088756,
"tokens_seen": 3023044608
},
{
"epoch": 0.11,
"objective/train/docs_used": 1658380,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6505117416381836,
"objective/train/theoretical_loss": 3.3187981009224297,
"objective/train/tokens_used": 53226976,
"theoretical_loss": 3.3187981009224297,
"tokens_seen": 3023175680
},
{
"epoch": 0.11,
"learning_rate": 9.033361847733107e-05,
"loss": 2.4709,
"theoretical_loss": 3.3187981009224297,
"tokens_seen": 3023175680
},
{
"epoch": 0.11,
"learning_rate": 9.029084687767323e-05,
"loss": 2.3951,
"theoretical_loss": 3.3187870553690972,
"tokens_seen": 3023306752
},
{
"epoch": 0.11,
"learning_rate": 9.02480752780154e-05,
"loss": 2.5401,
"theoretical_loss": 3.3187760104286976,
"tokens_seen": 3023437824
},
{
"epoch": 0.11,
"learning_rate": 9.020530367835757e-05,
"loss": 2.4571,
"theoretical_loss": 3.3187649661011704,
"tokens_seen": 3023568896
},
{
"epoch": 0.11,
"learning_rate": 9.016253207869975e-05,
"loss": 2.362,
"theoretical_loss": 3.3187539223864557,
"tokens_seen": 3023699968
},
{
"epoch": 0.11,
"learning_rate": 9.011976047904193e-05,
"loss": 2.4072,
"theoretical_loss": 3.318742879284492,
"tokens_seen": 3023831040
},
{
"epoch": 0.11,
"learning_rate": 9.00769888793841e-05,
"loss": 2.3097,
"theoretical_loss": 3.3187318367952194,
"tokens_seen": 3023962112
},
{
"epoch": 0.11,
"learning_rate": 9.003421727972626e-05,
"loss": 2.3313,
"theoretical_loss": 3.318720794918577,
"tokens_seen": 3024093184
},
{
"epoch": 0.11,
"learning_rate": 8.999144568006844e-05,
"loss": 2.3635,
"theoretical_loss": 3.3187097536545047,
"tokens_seen": 3024224256
},
{
"epoch": 0.11,
"learning_rate": 8.994867408041062e-05,
"loss": 2.4897,
"theoretical_loss": 3.3186987130029415,
"tokens_seen": 3024355328
},
{
"epoch": 0.11,
"learning_rate": 8.990590248075278e-05,
"loss": 2.5518,
"theoretical_loss": 3.3186876729638266,
"tokens_seen": 3024486400
},
{
"epoch": 0.11,
"learning_rate": 8.986313088109496e-05,
"loss": 2.4191,
"theoretical_loss": 3.3186766335371005,
"tokens_seen": 3024617472
},
{
"epoch": 0.11,
"learning_rate": 8.982035928143712e-05,
"loss": 2.4865,
"theoretical_loss": 3.318665594722702,
"tokens_seen": 3024748544
},
{
"epoch": 0.11,
"objective/train/docs_used": 1658975,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6697089672088623,
"objective/train/theoretical_loss": 3.3186600755451066,
"objective/train/tokens_used": 54865376,
"theoretical_loss": 3.3186600755451066,
"tokens_seen": 3024814080
},
{
"epoch": 0.11,
"learning_rate": 8.97775876817793e-05,
"loss": 2.4613,
"theoretical_loss": 3.3186545565205705,
"tokens_seen": 3024879616
},
{
"epoch": 0.11,
"learning_rate": 8.973481608212148e-05,
"loss": 2.4463,
"theoretical_loss": 3.318643518930646,
"tokens_seen": 3025010688
},
{
"epoch": 0.11,
"learning_rate": 8.969204448246365e-05,
"loss": 2.3875,
"theoretical_loss": 3.3186324819528674,
"tokens_seen": 3025141760
},
{
"epoch": 0.11,
"learning_rate": 8.964927288280582e-05,
"loss": 2.4101,
"theoretical_loss": 3.318621445587175,
"tokens_seen": 3025272832
},
{
"epoch": 0.11,
"learning_rate": 8.960650128314799e-05,
"loss": 2.3602,
"theoretical_loss": 3.3186104098335076,
"tokens_seen": 3025403904
},
{
"epoch": 0.11,
"learning_rate": 8.956372968349017e-05,
"loss": 2.41,
"theoretical_loss": 3.318599374691805,
"tokens_seen": 3025534976
},
{
"epoch": 0.11,
"learning_rate": 8.952095808383235e-05,
"loss": 2.4345,
"theoretical_loss": 3.318588340162007,
"tokens_seen": 3025666048
},
{
"epoch": 0.11,
"learning_rate": 8.947818648417451e-05,
"loss": 2.5437,
"theoretical_loss": 3.3185773062440527,
"tokens_seen": 3025797120
},
{
"epoch": 0.11,
"learning_rate": 8.943541488451669e-05,
"loss": 2.3191,
"theoretical_loss": 3.3185662729378826,
"tokens_seen": 3025928192
},
{
"epoch": 0.12,
"learning_rate": 8.939264328485885e-05,
"loss": 2.3322,
"theoretical_loss": 3.318555240243435,
"tokens_seen": 3026059264
},
{
"epoch": 0.12,
"learning_rate": 8.934987168520103e-05,
"loss": 2.3374,
"theoretical_loss": 3.3185442081606507,
"tokens_seen": 3026190336
},
{
"epoch": 0.12,
"learning_rate": 8.930710008554321e-05,
"loss": 2.4206,
"theoretical_loss": 3.3185331766894683,
"tokens_seen": 3026321408
},
{
"epoch": 0.12,
"objective/train/docs_used": 1660130,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.2490313053131104,
"objective/train/theoretical_loss": 3.318522145829828,
"objective/train/tokens_used": 56503776,
"theoretical_loss": 3.318522145829828,
"tokens_seen": 3026452480
},
{
"epoch": 0.12,
"learning_rate": 8.926432848588537e-05,
"loss": 2.2883,
"theoretical_loss": 3.318522145829828,
"tokens_seen": 3026452480
},
{
"epoch": 0.12,
"learning_rate": 8.922155688622755e-05,
"loss": 2.4436,
"theoretical_loss": 3.318511115581669,
"tokens_seen": 3026583552
},
{
"epoch": 0.12,
"learning_rate": 8.917878528656972e-05,
"loss": 2.4165,
"theoretical_loss": 3.3185000859449314,
"tokens_seen": 3026714624
},
{
"epoch": 0.12,
"learning_rate": 8.91360136869119e-05,
"loss": 2.3836,
"theoretical_loss": 3.3184890569195544,
"tokens_seen": 3026845696
},
{
"epoch": 0.12,
"learning_rate": 8.909324208725407e-05,
"loss": 2.3455,
"theoretical_loss": 3.3184780285054782,
"tokens_seen": 3026976768
},
{
"epoch": 0.12,
"learning_rate": 8.905047048759624e-05,
"loss": 2.4005,
"theoretical_loss": 3.318467000702642,
"tokens_seen": 3027107840
},
{
"epoch": 0.12,
"learning_rate": 8.900769888793842e-05,
"loss": 2.425,
"theoretical_loss": 3.3184559735109853,
"tokens_seen": 3027238912
},
{
"epoch": 0.12,
"learning_rate": 8.896492728828058e-05,
"loss": 2.4123,
"theoretical_loss": 3.3184449469304482,
"tokens_seen": 3027369984
},
{
"epoch": 0.12,
"learning_rate": 8.892215568862276e-05,
"loss": 2.4926,
"theoretical_loss": 3.3184339209609703,
"tokens_seen": 3027501056
},
{
"epoch": 0.12,
"learning_rate": 8.887938408896494e-05,
"loss": 2.2946,
"theoretical_loss": 3.318422895602491,
"tokens_seen": 3027632128
},
{
"epoch": 0.12,
"learning_rate": 8.88366124893071e-05,
"loss": 2.2669,
"theoretical_loss": 3.31841187085495,
"tokens_seen": 3027763200
},
{
"epoch": 0.12,
"learning_rate": 8.879384088964928e-05,
"loss": 2.2708,
"theoretical_loss": 3.318400846718288,
"tokens_seen": 3027894272
},
{
"epoch": 0.12,
"learning_rate": 8.875106928999145e-05,
"loss": 2.3823,
"theoretical_loss": 3.318389823192443,
"tokens_seen": 3028025344
},
{
"epoch": 0.12,
"objective/train/docs_used": 1661412,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.202181100845337,
"objective/train/theoretical_loss": 3.3183843116585585,
"objective/train/tokens_used": 58142176,
"theoretical_loss": 3.3183843116585585,
"tokens_seen": 3028090880
},
{
"epoch": 0.12,
"learning_rate": 8.870829769033362e-05,
"loss": 2.4362,
"theoretical_loss": 3.318378800277356,
"tokens_seen": 3028156416
},
{
"epoch": 0.12,
"learning_rate": 8.86655260906758e-05,
"loss": 2.3161,
"theoretical_loss": 3.3183677779729663,
"tokens_seen": 3028287488
},
{
"epoch": 0.12,
"learning_rate": 8.862275449101797e-05,
"loss": 2.4133,
"theoretical_loss": 3.3183567562792136,
"tokens_seen": 3028418560
},
{
"epoch": 0.12,
"learning_rate": 8.857998289136013e-05,
"loss": 2.464,
"theoretical_loss": 3.3183457351960377,
"tokens_seen": 3028549632
},
{
"epoch": 0.12,
"learning_rate": 8.853721129170231e-05,
"loss": 2.4854,
"theoretical_loss": 3.3183347147233784,
"tokens_seen": 3028680704
},
{
"epoch": 0.12,
"learning_rate": 8.849443969204449e-05,
"loss": 2.2685,
"theoretical_loss": 3.3183236948611756,
"tokens_seen": 3028811776
},
{
"epoch": 0.12,
"learning_rate": 8.845166809238667e-05,
"loss": 2.1281,
"theoretical_loss": 3.3183126756093686,
"tokens_seen": 3028942848
},
{
"epoch": 0.12,
"learning_rate": 8.840889649272883e-05,
"loss": 2.3911,
"theoretical_loss": 3.318301656967898,
"tokens_seen": 3029073920
},
{
"epoch": 0.13,
"learning_rate": 8.8366124893071e-05,
"loss": 2.2723,
"theoretical_loss": 3.3182906389367024,
"tokens_seen": 3029204992
},
{
"epoch": 0.13,
"learning_rate": 8.832335329341318e-05,
"loss": 2.2601,
"theoretical_loss": 3.3182796215157224,
"tokens_seen": 3029336064
},
{
"epoch": 0.13,
"learning_rate": 8.828058169375535e-05,
"loss": 2.3874,
"theoretical_loss": 3.318268604704898,
"tokens_seen": 3029467136
},
{
"epoch": 0.13,
"learning_rate": 8.823781009409753e-05,
"loss": 2.3443,
"theoretical_loss": 3.318257588504168,
"tokens_seen": 3029598208
},
{
"epoch": 0.13,
"objective/train/docs_used": 1662079,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.219090700149536,
"objective/train/theoretical_loss": 3.318246572913474,
"objective/train/tokens_used": 59780576,
"theoretical_loss": 3.318246572913474,
"tokens_seen": 3029729280
},
{
"epoch": 0.13,
"learning_rate": 8.81950384944397e-05,
"loss": 2.4424,
"theoretical_loss": 3.318246572913474,
"tokens_seen": 3029729280
},
{
"epoch": 0.13,
"learning_rate": 8.815226689478186e-05,
"loss": 2.4172,
"theoretical_loss": 3.318235557932754,
"tokens_seen": 3029860352
},
{
"epoch": 0.13,
"learning_rate": 8.810949529512404e-05,
"loss": 2.2985,
"theoretical_loss": 3.318224543561948,
"tokens_seen": 3029991424
},
{
"epoch": 0.13,
"learning_rate": 8.806672369546622e-05,
"loss": 2.5111,
"theoretical_loss": 3.3182135298009974,
"tokens_seen": 3030122496
},
{
"epoch": 0.13,
"learning_rate": 8.80239520958084e-05,
"loss": 2.481,
"theoretical_loss": 3.3182025166498406,
"tokens_seen": 3030253568
},
{
"epoch": 0.13,
"learning_rate": 8.798118049615056e-05,
"loss": 2.2891,
"theoretical_loss": 3.3181915041084182,
"tokens_seen": 3030384640
},
{
"epoch": 0.13,
"learning_rate": 8.793840889649273e-05,
"loss": 2.2625,
"theoretical_loss": 3.3181804921766695,
"tokens_seen": 3030515712
},
{
"epoch": 0.13,
"learning_rate": 8.78956372968349e-05,
"loss": 2.6118,
"theoretical_loss": 3.318169480854535,
"tokens_seen": 3030646784
},
{
"epoch": 0.13,
"learning_rate": 8.785286569717708e-05,
"loss": 2.3715,
"theoretical_loss": 3.318158470141954,
"tokens_seen": 3030777856
},
{
"epoch": 0.13,
"learning_rate": 8.781009409751925e-05,
"loss": 2.4212,
"theoretical_loss": 3.3181474600388667,
"tokens_seen": 3030908928
},
{
"epoch": 0.13,
"learning_rate": 8.776732249786143e-05,
"loss": 2.1904,
"theoretical_loss": 3.318136450545213,
"tokens_seen": 3031040000
},
{
"epoch": 0.13,
"learning_rate": 8.772455089820359e-05,
"loss": 2.3964,
"theoretical_loss": 3.318125441660933,
"tokens_seen": 3031171072
},
{
"epoch": 0.13,
"learning_rate": 8.768177929854577e-05,
"loss": 2.2051,
"theoretical_loss": 3.318114433385966,
"tokens_seen": 3031302144
},
{
"epoch": 0.13,
"objective/train/docs_used": 1662642,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.347285270690918,
"objective/train/theoretical_loss": 3.3181089294769563,
"objective/train/tokens_used": 61418976,
"theoretical_loss": 3.3181089294769563,
"tokens_seen": 3031367680
},
{
"epoch": 0.13,
"learning_rate": 8.763900769888795e-05,
"loss": 2.4334,
"theoretical_loss": 3.3181034257202526,
"tokens_seen": 3031433216
},
{
"epoch": 0.13,
"learning_rate": 8.759623609923011e-05,
"loss": 2.3486,
"theoretical_loss": 3.318092418663732,
"tokens_seen": 3031564288
},
{
"epoch": 0.13,
"learning_rate": 8.755346449957229e-05,
"loss": 2.4626,
"theoretical_loss": 3.3180814122163453,
"tokens_seen": 3031695360
},
{
"epoch": 0.13,
"learning_rate": 8.751069289991445e-05,
"loss": 2.4253,
"theoretical_loss": 3.3180704063780313,
"tokens_seen": 3031826432
},
{
"epoch": 0.13,
"learning_rate": 8.746792130025663e-05,
"loss": 2.4214,
"theoretical_loss": 3.318059401148731,
"tokens_seen": 3031957504
},
{
"epoch": 0.13,
"learning_rate": 8.742514970059881e-05,
"loss": 2.536,
"theoretical_loss": 3.3180483965283836,
"tokens_seen": 3032088576
},
{
"epoch": 0.14,
"learning_rate": 8.738237810094098e-05,
"loss": 2.3038,
"theoretical_loss": 3.318037392516929,
"tokens_seen": 3032219648
},
{
"epoch": 0.14,
"learning_rate": 8.733960650128315e-05,
"loss": 2.3862,
"theoretical_loss": 3.318026389114308,
"tokens_seen": 3032350720
},
{
"epoch": 0.14,
"learning_rate": 8.729683490162532e-05,
"loss": 2.2624,
"theoretical_loss": 3.3180153863204596,
"tokens_seen": 3032481792
},
{
"epoch": 0.14,
"learning_rate": 8.72540633019675e-05,
"loss": 2.4936,
"theoretical_loss": 3.3180043841353246,
"tokens_seen": 3032612864
},
{
"epoch": 0.14,
"learning_rate": 8.721129170230968e-05,
"loss": 2.384,
"theoretical_loss": 3.317993382558843,
"tokens_seen": 3032743936
},
{
"epoch": 0.14,
"learning_rate": 8.716852010265184e-05,
"loss": 2.4542,
"theoretical_loss": 3.317982381590954,
"tokens_seen": 3032875008
},
{
"epoch": 0.14,
"objective/train/docs_used": 1663221,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7515668869018555,
"objective/train/theoretical_loss": 3.3179713812315983,
"objective/train/tokens_used": 63057376,
"theoretical_loss": 3.3179713812315983,
"tokens_seen": 3033006080
},
{
"epoch": 0.14,
"learning_rate": 8.712574850299402e-05,
"loss": 2.4188,
"theoretical_loss": 3.3179713812315983,
"tokens_seen": 3033006080
},
{
"epoch": 0.14,
"learning_rate": 8.708297690333618e-05,
"loss": 2.5102,
"theoretical_loss": 3.317960381480716,
"tokens_seen": 3033137152
},
{
"epoch": 0.14,
"learning_rate": 8.704020530367836e-05,
"loss": 2.39,
"theoretical_loss": 3.317949382338247,
"tokens_seen": 3033268224
},
{
"epoch": 0.14,
"learning_rate": 8.699743370402054e-05,
"loss": 2.3996,
"theoretical_loss": 3.3179383838041314,
"tokens_seen": 3033399296
},
{
"epoch": 0.14,
"learning_rate": 8.69546621043627e-05,
"loss": 2.6354,
"theoretical_loss": 3.317927385878309,
"tokens_seen": 3033530368
},
{
"epoch": 0.14,
"learning_rate": 8.691189050470488e-05,
"loss": 2.4611,
"theoretical_loss": 3.31791638856072,
"tokens_seen": 3033661440
},
{
"epoch": 0.14,
"learning_rate": 8.686911890504705e-05,
"loss": 2.4918,
"theoretical_loss": 3.317905391851305,
"tokens_seen": 3033792512
},
{
"epoch": 0.14,
"learning_rate": 8.682634730538923e-05,
"loss": 2.4662,
"theoretical_loss": 3.3178943957500033,
"tokens_seen": 3033923584
},
{
"epoch": 0.14,
"learning_rate": 8.67835757057314e-05,
"loss": 2.5224,
"theoretical_loss": 3.317883400256756,
"tokens_seen": 3034054656
},
{
"epoch": 0.14,
"learning_rate": 8.674080410607357e-05,
"loss": 2.5573,
"theoretical_loss": 3.3178724053715016,
"tokens_seen": 3034185728
},
{
"epoch": 0.14,
"learning_rate": 8.669803250641575e-05,
"loss": 2.4771,
"theoretical_loss": 3.3178614110941815,
"tokens_seen": 3034316800
},
{
"epoch": 0.14,
"learning_rate": 8.665526090675791e-05,
"loss": 2.6235,
"theoretical_loss": 3.3178504174247356,
"tokens_seen": 3034447872
},
{
"epoch": 0.14,
"learning_rate": 8.661248930710009e-05,
"loss": 2.5638,
"theoretical_loss": 3.317839424363104,
"tokens_seen": 3034578944
},
{
"epoch": 0.14,
"objective/train/docs_used": 1664363,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.340308666229248,
"objective/train/theoretical_loss": 3.3178339280602,
"objective/train/tokens_used": 64695776,
"theoretical_loss": 3.3178339280602,
"tokens_seen": 3034644480
},
{
"epoch": 0.14,
"learning_rate": 8.656971770744227e-05,
"loss": 2.4057,
"theoretical_loss": 3.317828431909227,
"tokens_seen": 3034710016
},
{
"epoch": 0.14,
"learning_rate": 8.652694610778443e-05,
"loss": 2.3668,
"theoretical_loss": 3.3178174400630445,
"tokens_seen": 3034841088
},
{
"epoch": 0.14,
"learning_rate": 8.64841745081266e-05,
"loss": 2.616,
"theoretical_loss": 3.3178064488244967,
"tokens_seen": 3034972160
},
{
"epoch": 0.14,
"learning_rate": 8.644140290846878e-05,
"loss": 2.402,
"theoretical_loss": 3.3177954581935234,
"tokens_seen": 3035103232
},
{
"epoch": 0.14,
"learning_rate": 8.639863130881095e-05,
"loss": 2.3662,
"theoretical_loss": 3.317784468170066,
"tokens_seen": 3035234304
},
{
"epoch": 0.15,
"learning_rate": 8.635585970915313e-05,
"loss": 2.3539,
"theoretical_loss": 3.317773478754063,
"tokens_seen": 3035365376
},
{
"epoch": 0.15,
"learning_rate": 8.63130881094953e-05,
"loss": 2.4253,
"theoretical_loss": 3.317762489945456,
"tokens_seen": 3035496448
},
{
"epoch": 0.15,
"learning_rate": 8.627031650983746e-05,
"loss": 2.3829,
"theoretical_loss": 3.3177515017441843,
"tokens_seen": 3035627520
},
{
"epoch": 0.15,
"learning_rate": 8.622754491017964e-05,
"loss": 2.5298,
"theoretical_loss": 3.3177405141501883,
"tokens_seen": 3035758592
},
{
"epoch": 0.15,
"learning_rate": 8.618477331052182e-05,
"loss": 2.4475,
"theoretical_loss": 3.317729527163409,
"tokens_seen": 3035889664
},
{
"epoch": 0.15,
"learning_rate": 8.6142001710864e-05,
"loss": 2.7431,
"theoretical_loss": 3.3177185407837855,
"tokens_seen": 3036020736
},
{
"epoch": 0.15,
"learning_rate": 8.609923011120616e-05,
"loss": 2.4028,
"theoretical_loss": 3.3177075550112587,
"tokens_seen": 3036151808
},
{
"epoch": 0.15,
"objective/train/docs_used": 1664891,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.417335033416748,
"objective/train/theoretical_loss": 3.3176965698457686,
"objective/train/tokens_used": 66334176,
"theoretical_loss": 3.3176965698457686,
"tokens_seen": 3036282880
},
{
"epoch": 0.15,
"learning_rate": 8.605645851154833e-05,
"loss": 2.4356,
"theoretical_loss": 3.3176965698457686,
"tokens_seen": 3036282880
},
{
"epoch": 0.15,
"learning_rate": 8.601368691189052e-05,
"loss": 2.4589,
"theoretical_loss": 3.3176855852872555,
"tokens_seen": 3036413952
},
{
"epoch": 0.15,
"learning_rate": 8.597091531223268e-05,
"loss": 2.4168,
"theoretical_loss": 3.3176746013356597,
"tokens_seen": 3036545024
},
{
"epoch": 0.15,
"learning_rate": 8.592814371257485e-05,
"loss": 2.3732,
"theoretical_loss": 3.317663617990922,
"tokens_seen": 3036676096
},
{
"epoch": 0.15,
"learning_rate": 8.588537211291703e-05,
"loss": 2.3843,
"theoretical_loss": 3.3176526352529816,
"tokens_seen": 3036807168
},
{
"epoch": 0.15,
"learning_rate": 8.584260051325919e-05,
"loss": 2.3937,
"theoretical_loss": 3.31764165312178,
"tokens_seen": 3036938240
},
{
"epoch": 0.15,
"learning_rate": 8.579982891360138e-05,
"loss": 2.4782,
"theoretical_loss": 3.3176306715972563,
"tokens_seen": 3037069312
},
{
"epoch": 0.15,
"learning_rate": 8.575705731394355e-05,
"loss": 2.5745,
"theoretical_loss": 3.3176196906793516,
"tokens_seen": 3037200384
},
{
"epoch": 0.15,
"learning_rate": 8.571428571428571e-05,
"loss": 2.4332,
"theoretical_loss": 3.3176087103680056,
"tokens_seen": 3037331456
},
{
"epoch": 0.15,
"learning_rate": 8.567151411462789e-05,
"loss": 2.4773,
"theoretical_loss": 3.3175977306631594,
"tokens_seen": 3037462528
},
{
"epoch": 0.15,
"learning_rate": 8.562874251497006e-05,
"loss": 2.4218,
"theoretical_loss": 3.3175867515647526,
"tokens_seen": 3037593600
},
{
"epoch": 0.15,
"learning_rate": 8.558597091531225e-05,
"loss": 2.3285,
"theoretical_loss": 3.317575773072726,
"tokens_seen": 3037724672
},
{
"epoch": 0.15,
"learning_rate": 8.554319931565441e-05,
"loss": 2.2487,
"theoretical_loss": 3.3175647951870197,
"tokens_seen": 3037855744
},
{
"epoch": 0.15,
"objective/train/docs_used": 1665791,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.037510395050049,
"objective/train/theoretical_loss": 3.317559306471518,
"objective/train/tokens_used": 67972576,
"theoretical_loss": 3.317559306471518,
"tokens_seen": 3037921280
},
{
"epoch": 0.15,
"learning_rate": 8.550042771599658e-05,
"loss": 2.4374,
"theoretical_loss": 3.3175538179075743,
"tokens_seen": 3037986816
},
{
"epoch": 0.15,
"learning_rate": 8.545765611633876e-05,
"loss": 2.3843,
"theoretical_loss": 3.31754284123433,
"tokens_seen": 3038117888
},
{
"epoch": 0.15,
"learning_rate": 8.541488451668092e-05,
"loss": 2.3408,
"theoretical_loss": 3.3175318651672274,
"tokens_seen": 3038248960
},
{
"epoch": 0.15,
"learning_rate": 8.537211291702311e-05,
"loss": 2.45,
"theoretical_loss": 3.3175208897062065,
"tokens_seen": 3038380032
},
{
"epoch": 0.16,
"learning_rate": 8.532934131736528e-05,
"loss": 2.5611,
"theoretical_loss": 3.317509914851208,
"tokens_seen": 3038511104
},
{
"epoch": 0.16,
"learning_rate": 8.528656971770744e-05,
"loss": 2.2891,
"theoretical_loss": 3.3174989406021718,
"tokens_seen": 3038642176
},
{
"epoch": 0.16,
"learning_rate": 8.524379811804962e-05,
"loss": 2.5899,
"theoretical_loss": 3.317487966959039,
"tokens_seen": 3038773248
},
{
"epoch": 0.16,
"learning_rate": 8.520102651839178e-05,
"loss": 2.4568,
"theoretical_loss": 3.3174769939217494,
"tokens_seen": 3038904320
},
{
"epoch": 0.16,
"learning_rate": 8.515825491873396e-05,
"loss": 2.4892,
"theoretical_loss": 3.317466021490244,
"tokens_seen": 3039035392
},
{
"epoch": 0.16,
"learning_rate": 8.511548331907614e-05,
"loss": 2.6673,
"theoretical_loss": 3.3174550496644626,
"tokens_seen": 3039166464
},
{
"epoch": 0.16,
"learning_rate": 8.50727117194183e-05,
"loss": 2.4839,
"theoretical_loss": 3.317444078444346,
"tokens_seen": 3039297536
},
{
"epoch": 0.16,
"learning_rate": 8.502994011976048e-05,
"loss": 2.4649,
"theoretical_loss": 3.317433107829835,
"tokens_seen": 3039428608
},
{
"epoch": 0.16,
"objective/train/docs_used": 1666368,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4885432720184326,
"objective/train/theoretical_loss": 3.31742213782087,
"objective/train/tokens_used": 69610976,
"theoretical_loss": 3.31742213782087,
"tokens_seen": 3039559680
},
{
"epoch": 0.16,
"learning_rate": 8.498716852010266e-05,
"loss": 2.5349,
"theoretical_loss": 3.31742213782087,
"tokens_seen": 3039559680
},
{
"epoch": 0.16,
"learning_rate": 8.494439692044483e-05,
"loss": 2.403,
"theoretical_loss": 3.3174111684173906,
"tokens_seen": 3039690752
},
{
"epoch": 0.16,
"learning_rate": 8.4901625320787e-05,
"loss": 2.5023,
"theoretical_loss": 3.317400199619338,
"tokens_seen": 3039821824
},
{
"epoch": 0.16,
"learning_rate": 8.485885372112917e-05,
"loss": 2.4205,
"theoretical_loss": 3.3173892314266524,
"tokens_seen": 3039952896
},
{
"epoch": 0.16,
"learning_rate": 8.481608212147135e-05,
"loss": 2.5295,
"theoretical_loss": 3.3173782638392746,
"tokens_seen": 3040083968
},
{
"epoch": 0.16,
"learning_rate": 8.477331052181353e-05,
"loss": 2.4369,
"theoretical_loss": 3.3173672968571446,
"tokens_seen": 3040215040
},
{
"epoch": 0.16,
"learning_rate": 8.473053892215569e-05,
"loss": 2.5302,
"theoretical_loss": 3.3173563304802034,
"tokens_seen": 3040346112
},
{
"epoch": 0.16,
"learning_rate": 8.468776732249787e-05,
"loss": 2.486,
"theoretical_loss": 3.3173453647083915,
"tokens_seen": 3040477184
},
{
"epoch": 0.16,
"learning_rate": 8.464499572284003e-05,
"loss": 2.511,
"theoretical_loss": 3.317334399541649,
"tokens_seen": 3040608256
},
{
"epoch": 0.16,
"learning_rate": 8.460222412318221e-05,
"loss": 2.6424,
"theoretical_loss": 3.3173234349799166,
"tokens_seen": 3040739328
},
{
"epoch": 0.16,
"learning_rate": 8.455945252352439e-05,
"loss": 2.4228,
"theoretical_loss": 3.317312471023135,
"tokens_seen": 3040870400
},
{
"epoch": 0.16,
"learning_rate": 8.451668092386656e-05,
"loss": 2.5719,
"theoretical_loss": 3.3173015076712447,
"tokens_seen": 3041001472
},
{
"epoch": 0.16,
"learning_rate": 8.447390932420873e-05,
"loss": 2.5819,
"theoretical_loss": 3.3172905449241865,
"tokens_seen": 3041132544
},
{
"epoch": 0.16,
"objective/train/docs_used": 1667402,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5028133392333984,
"objective/train/theoretical_loss": 3.317285063777451,
"objective/train/tokens_used": 71249376,
"theoretical_loss": 3.317285063777451,
"tokens_seen": 3041198080
},
{
"epoch": 0.16,
"learning_rate": 8.44311377245509e-05,
"loss": 2.5006,
"theoretical_loss": 3.3172795827819,
"tokens_seen": 3041263616
},
{
"epoch": 0.16,
"learning_rate": 8.438836612489306e-05,
"loss": 2.4687,
"theoretical_loss": 3.3172686212443274,
"tokens_seen": 3041394688
},
{
"epoch": 0.17,
"learning_rate": 8.434559452523526e-05,
"loss": 2.5483,
"theoretical_loss": 3.317257660311408,
"tokens_seen": 3041525760
},
{
"epoch": 0.17,
"learning_rate": 8.430282292557742e-05,
"loss": 2.6292,
"theoretical_loss": 3.3172466999830825,
"tokens_seen": 3041656832
},
{
"epoch": 0.17,
"learning_rate": 8.42600513259196e-05,
"loss": 2.4604,
"theoretical_loss": 3.317235740259292,
"tokens_seen": 3041787904
},
{
"epoch": 0.17,
"learning_rate": 8.421727972626176e-05,
"loss": 2.4137,
"theoretical_loss": 3.3172247811399767,
"tokens_seen": 3041918976
},
{
"epoch": 0.17,
"learning_rate": 8.417450812660394e-05,
"loss": 2.5,
"theoretical_loss": 3.317213822625077,
"tokens_seen": 3042050048
},
{
"epoch": 0.17,
"learning_rate": 8.413173652694612e-05,
"loss": 2.5009,
"theoretical_loss": 3.3172028647145346,
"tokens_seen": 3042181120
},
{
"epoch": 0.17,
"learning_rate": 8.408896492728828e-05,
"loss": 2.4612,
"theoretical_loss": 3.317191907408289,
"tokens_seen": 3042312192
},
{
"epoch": 0.17,
"learning_rate": 8.404619332763046e-05,
"loss": 2.4446,
"theoretical_loss": 3.3171809507062817,
"tokens_seen": 3042443264
},
{
"epoch": 0.17,
"learning_rate": 8.400342172797263e-05,
"loss": 2.5055,
"theoretical_loss": 3.3171699946084523,
"tokens_seen": 3042574336
},
{
"epoch": 0.17,
"learning_rate": 8.39606501283148e-05,
"loss": 2.5237,
"theoretical_loss": 3.3171590391147427,
"tokens_seen": 3042705408
},
{
"epoch": 0.17,
"objective/train/docs_used": 1668521,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3456153869628906,
"objective/train/theoretical_loss": 3.3171480842250927,
"objective/train/tokens_used": 72887776,
"theoretical_loss": 3.3171480842250927,
"tokens_seen": 3042836480
},
{
"epoch": 0.17,
"learning_rate": 8.391787852865698e-05,
"loss": 2.4685,
"theoretical_loss": 3.3171480842250927,
"tokens_seen": 3042836480
},
{
"epoch": 0.17,
"learning_rate": 8.387510692899915e-05,
"loss": 2.4987,
"theoretical_loss": 3.317137129939443,
"tokens_seen": 3042967552
},
{
"epoch": 0.17,
"learning_rate": 8.383233532934131e-05,
"loss": 2.4009,
"theoretical_loss": 3.3171261762577346,
"tokens_seen": 3043098624
},
{
"epoch": 0.17,
"learning_rate": 8.378956372968349e-05,
"loss": 2.4797,
"theoretical_loss": 3.3171152231799086,
"tokens_seen": 3043229696
},
{
"epoch": 0.17,
"learning_rate": 8.374679213002567e-05,
"loss": 2.4712,
"theoretical_loss": 3.317104270705905,
"tokens_seen": 3043360768
},
{
"epoch": 0.17,
"learning_rate": 8.370402053036785e-05,
"loss": 2.4712,
"theoretical_loss": 3.3170933188356644,
"tokens_seen": 3043491840
},
{
"epoch": 0.17,
"learning_rate": 8.366124893071001e-05,
"loss": 2.5555,
"theoretical_loss": 3.3170823675691277,
"tokens_seen": 3043622912
},
{
"epoch": 0.17,
"learning_rate": 8.361847733105218e-05,
"loss": 2.5155,
"theoretical_loss": 3.317071416906236,
"tokens_seen": 3043753984
},
{
"epoch": 0.17,
"learning_rate": 8.357570573139436e-05,
"loss": 2.4821,
"theoretical_loss": 3.3170604668469297,
"tokens_seen": 3043885056
},
{
"epoch": 0.17,
"learning_rate": 8.353293413173653e-05,
"loss": 2.5343,
"theoretical_loss": 3.31704951739115,
"tokens_seen": 3044016128
},
{
"epoch": 0.17,
"learning_rate": 8.349016253207871e-05,
"loss": 2.4523,
"theoretical_loss": 3.317038568538837,
"tokens_seen": 3044147200
},
{
"epoch": 0.17,
"learning_rate": 8.344739093242088e-05,
"loss": 2.3806,
"theoretical_loss": 3.317027620289932,
"tokens_seen": 3044278272
},
{
"epoch": 0.17,
"learning_rate": 8.340461933276304e-05,
"loss": 2.5725,
"theoretical_loss": 3.317016672644375,
"tokens_seen": 3044409344
},
{
"epoch": 0.17,
"objective/train/docs_used": 1668980,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.030937671661377,
"objective/train/theoretical_loss": 3.3170111990478337,
"objective/train/tokens_used": 74526176,
"theoretical_loss": 3.3170111990478337,
"tokens_seen": 3044474880
},
{
"epoch": 0.17,
"learning_rate": 8.336184773310522e-05,
"loss": 2.3872,
"theoretical_loss": 3.3170057256021077,
"tokens_seen": 3044540416
},
{
"epoch": 0.18,
"learning_rate": 8.33190761334474e-05,
"loss": 2.497,
"theoretical_loss": 3.3169947791630703,
"tokens_seen": 3044671488
},
{
"epoch": 0.18,
"learning_rate": 8.327630453378958e-05,
"loss": 2.5771,
"theoretical_loss": 3.3169838333272037,
"tokens_seen": 3044802560
},
{
"epoch": 0.18,
"learning_rate": 8.323353293413174e-05,
"loss": 2.4761,
"theoretical_loss": 3.316972888094449,
"tokens_seen": 3044933632
},
{
"epoch": 0.18,
"learning_rate": 8.319076133447391e-05,
"loss": 2.4855,
"theoretical_loss": 3.3169619434647464,
"tokens_seen": 3045064704
},
{
"epoch": 0.18,
"learning_rate": 8.314798973481609e-05,
"loss": 2.3585,
"theoretical_loss": 3.3169509994380375,
"tokens_seen": 3045195776
},
{
"epoch": 0.18,
"learning_rate": 8.310521813515826e-05,
"loss": 2.5985,
"theoretical_loss": 3.3169400560142623,
"tokens_seen": 3045326848
},
{
"epoch": 0.18,
"learning_rate": 8.306244653550043e-05,
"loss": 2.4823,
"theoretical_loss": 3.3169291131933623,
"tokens_seen": 3045457920
},
{
"epoch": 0.18,
"learning_rate": 8.30196749358426e-05,
"loss": 2.4651,
"theoretical_loss": 3.316918170975278,
"tokens_seen": 3045588992
},
{
"epoch": 0.18,
"learning_rate": 8.297690333618477e-05,
"loss": 2.4029,
"theoretical_loss": 3.31690722935995,
"tokens_seen": 3045720064
},
{
"epoch": 0.18,
"learning_rate": 8.293413173652695e-05,
"loss": 2.5912,
"theoretical_loss": 3.3168962883473205,
"tokens_seen": 3045851136
},
{
"epoch": 0.18,
"learning_rate": 8.289136013686913e-05,
"loss": 2.5166,
"theoretical_loss": 3.316885347937329,
"tokens_seen": 3045982208
},
{
"epoch": 0.18,
"objective/train/docs_used": 1670028,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.2350833415985107,
"objective/train/theoretical_loss": 3.316874408129916,
"objective/train/tokens_used": 76164576,
"theoretical_loss": 3.316874408129916,
"tokens_seen": 3046113280
},
{
"epoch": 0.18,
"learning_rate": 8.284858853721129e-05,
"loss": 2.5141,
"theoretical_loss": 3.316874408129916,
"tokens_seen": 3046113280
},
{
"epoch": 0.18,
"learning_rate": 8.280581693755347e-05,
"loss": 2.441,
"theoretical_loss": 3.316863468925024,
"tokens_seen": 3046244352
},
{
"epoch": 0.18,
"learning_rate": 8.276304533789564e-05,
"loss": 2.5365,
"theoretical_loss": 3.3168525303225924,
"tokens_seen": 3046375424
},
{
"epoch": 0.18,
"learning_rate": 8.272027373823781e-05,
"loss": 2.6514,
"theoretical_loss": 3.316841592322563,
"tokens_seen": 3046506496
},
{
"epoch": 0.18,
"learning_rate": 8.267750213857999e-05,
"loss": 2.6148,
"theoretical_loss": 3.3168306549248765,
"tokens_seen": 3046637568
},
{
"epoch": 0.18,
"learning_rate": 8.263473053892216e-05,
"loss": 2.4865,
"theoretical_loss": 3.316819718129474,
"tokens_seen": 3046768640
},
{
"epoch": 0.18,
"learning_rate": 8.259195893926434e-05,
"loss": 2.5057,
"theoretical_loss": 3.3168087819362957,
"tokens_seen": 3046899712
},
{
"epoch": 0.18,
"learning_rate": 8.25491873396065e-05,
"loss": 2.4055,
"theoretical_loss": 3.316797846345283,
"tokens_seen": 3047030784
},
{
"epoch": 0.18,
"learning_rate": 8.250641573994868e-05,
"loss": 2.5902,
"theoretical_loss": 3.316786911356377,
"tokens_seen": 3047161856
},
{
"epoch": 0.18,
"learning_rate": 8.246364414029086e-05,
"loss": 2.3611,
"theoretical_loss": 3.316775976969519,
"tokens_seen": 3047292928
},
{
"epoch": 0.18,
"learning_rate": 8.242087254063302e-05,
"loss": 2.5959,
"theoretical_loss": 3.316765043184649,
"tokens_seen": 3047424000
},
{
"epoch": 0.18,
"learning_rate": 8.23781009409752e-05,
"loss": 2.5646,
"theoretical_loss": 3.316754110001708,
"tokens_seen": 3047555072
},
{
"epoch": 0.19,
"learning_rate": 8.233532934131736e-05,
"loss": 2.585,
"theoretical_loss": 3.3167431774206384,
"tokens_seen": 3047686144
},
{
"epoch": 0.19,
"objective/train/docs_used": 1670628,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 1.981849193572998,
"objective/train/theoretical_loss": 3.316737711355786,
"objective/train/tokens_used": 77802976,
"theoretical_loss": 3.316737711355786,
"tokens_seen": 3047751680
},
{
"epoch": 0.19,
"learning_rate": 8.229255774165954e-05,
"loss": 2.3982,
"theoretical_loss": 3.3167322454413792,
"tokens_seen": 3047817216
},
{
"epoch": 0.19,
"learning_rate": 8.224978614200172e-05,
"loss": 2.5217,
"theoretical_loss": 3.316721314063873,
"tokens_seen": 3047948288
},
{
"epoch": 0.19,
"learning_rate": 8.220701454234389e-05,
"loss": 2.6089,
"theoretical_loss": 3.3167103832880604,
"tokens_seen": 3048079360
},
{
"epoch": 0.19,
"learning_rate": 8.216424294268606e-05,
"loss": 2.529,
"theoretical_loss": 3.316699453113882,
"tokens_seen": 3048210432
},
{
"epoch": 0.19,
"learning_rate": 8.212147134302823e-05,
"loss": 2.4732,
"theoretical_loss": 3.3166885235412784,
"tokens_seen": 3048341504
},
{
"epoch": 0.19,
"learning_rate": 8.207869974337041e-05,
"loss": 2.4514,
"theoretical_loss": 3.316677594570192,
"tokens_seen": 3048472576
},
{
"epoch": 0.19,
"learning_rate": 8.203592814371259e-05,
"loss": 2.4329,
"theoretical_loss": 3.316666666200563,
"tokens_seen": 3048603648
},
{
"epoch": 0.19,
"learning_rate": 8.199315654405475e-05,
"loss": 2.5147,
"theoretical_loss": 3.316655738432332,
"tokens_seen": 3048734720
},
{
"epoch": 0.19,
"learning_rate": 8.195038494439693e-05,
"loss": 2.4813,
"theoretical_loss": 3.3166448112654408,
"tokens_seen": 3048865792
},
{
"epoch": 0.19,
"learning_rate": 8.190761334473909e-05,
"loss": 2.4503,
"theoretical_loss": 3.3166338846998302,
"tokens_seen": 3048996864
},
{
"epoch": 0.19,
"learning_rate": 8.186484174508127e-05,
"loss": 2.5814,
"theoretical_loss": 3.316622958735442,
"tokens_seen": 3049127936
},
{
"epoch": 0.19,
"learning_rate": 8.182207014542345e-05,
"loss": 2.5462,
"theoretical_loss": 3.3166120333722158,
"tokens_seen": 3049259008
},
{
"epoch": 0.19,
"objective/train/docs_used": 1671752,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.1045682430267334,
"objective/train/theoretical_loss": 3.3166011086100937,
"objective/train/tokens_used": 79441376,
"theoretical_loss": 3.3166011086100937,
"tokens_seen": 3049390080
},
{
"epoch": 0.19,
"learning_rate": 8.177929854576561e-05,
"loss": 2.5946,
"theoretical_loss": 3.3166011086100937,
"tokens_seen": 3049390080
},
{
"epoch": 0.19,
"learning_rate": 8.173652694610778e-05,
"loss": 2.4925,
"theoretical_loss": 3.3165901844490167,
"tokens_seen": 3049521152
},
{
"epoch": 0.19,
"learning_rate": 8.169375534644996e-05,
"loss": 2.4937,
"theoretical_loss": 3.3165792608889255,
"tokens_seen": 3049652224
},
{
"epoch": 0.19,
"learning_rate": 8.165098374679214e-05,
"loss": 2.5232,
"theoretical_loss": 3.3165683379297612,
"tokens_seen": 3049783296
},
{
"epoch": 0.19,
"learning_rate": 8.160821214713431e-05,
"loss": 2.4577,
"theoretical_loss": 3.3165574155714657,
"tokens_seen": 3049914368
},
{
"epoch": 0.19,
"learning_rate": 8.156544054747648e-05,
"loss": 2.5193,
"theoretical_loss": 3.3165464938139797,
"tokens_seen": 3050045440
},
{
"epoch": 0.19,
"learning_rate": 8.152266894781864e-05,
"loss": 2.4776,
"theoretical_loss": 3.3165355726572434,
"tokens_seen": 3050176512
},
{
"epoch": 0.19,
"learning_rate": 8.147989734816082e-05,
"loss": 2.4092,
"theoretical_loss": 3.3165246521011995,
"tokens_seen": 3050307584
},
{
"epoch": 0.19,
"learning_rate": 8.1437125748503e-05,
"loss": 2.3898,
"theoretical_loss": 3.3165137321457885,
"tokens_seen": 3050438656
},
{
"epoch": 0.19,
"learning_rate": 8.139435414884518e-05,
"loss": 2.4147,
"theoretical_loss": 3.3165028127909513,
"tokens_seen": 3050569728
},
{
"epoch": 0.19,
"learning_rate": 8.135158254918734e-05,
"loss": 2.559,
"theoretical_loss": 3.3164918940366293,
"tokens_seen": 3050700800
},
{
"epoch": 0.2,
"learning_rate": 8.130881094952951e-05,
"loss": 2.4779,
"theoretical_loss": 3.3164809758827634,
"tokens_seen": 3050831872
},
{
"epoch": 0.2,
"learning_rate": 8.126603934987169e-05,
"loss": 2.5393,
"theoretical_loss": 3.3164700583292954,
"tokens_seen": 3050962944
},
{
"epoch": 0.2,
"objective/train/docs_used": 1672176,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.2255687713623047,
"objective/train/theoretical_loss": 3.316464599777692,
"objective/train/tokens_used": 81079776,
"theoretical_loss": 3.316464599777692,
"tokens_seen": 3051028480
},
{
"epoch": 0.2,
"learning_rate": 8.122326775021386e-05,
"loss": 2.4892,
"theoretical_loss": 3.316459141376166,
"tokens_seen": 3051094016
},
{
"epoch": 0.2,
"learning_rate": 8.118049615055604e-05,
"loss": 2.4738,
"theoretical_loss": 3.3164482250233163,
"tokens_seen": 3051225088
},
{
"epoch": 0.2,
"learning_rate": 8.113772455089821e-05,
"loss": 2.5011,
"theoretical_loss": 3.316437309270688,
"tokens_seen": 3051356160
},
{
"epoch": 0.2,
"learning_rate": 8.109495295124037e-05,
"loss": 2.4879,
"theoretical_loss": 3.316426394118222,
"tokens_seen": 3051487232
},
{
"epoch": 0.2,
"learning_rate": 8.105218135158255e-05,
"loss": 2.441,
"theoretical_loss": 3.316415479565859,
"tokens_seen": 3051618304
},
{
"epoch": 0.2,
"learning_rate": 8.100940975192473e-05,
"loss": 2.5491,
"theoretical_loss": 3.3164045656135417,
"tokens_seen": 3051749376
},
{
"epoch": 0.2,
"learning_rate": 8.09666381522669e-05,
"loss": 2.502,
"theoretical_loss": 3.3163936522612096,
"tokens_seen": 3051880448
},
{
"epoch": 0.2,
"learning_rate": 8.092386655260907e-05,
"loss": 2.3763,
"theoretical_loss": 3.3163827395088052,
"tokens_seen": 3052011520
},
{
"epoch": 0.2,
"learning_rate": 8.088109495295124e-05,
"loss": 2.4419,
"theoretical_loss": 3.3163718273562695,
"tokens_seen": 3052142592
},
{
"epoch": 0.2,
"learning_rate": 8.083832335329341e-05,
"loss": 2.4011,
"theoretical_loss": 3.3163609158035436,
"tokens_seen": 3052273664
},
{
"epoch": 0.2,
"learning_rate": 8.07955517536356e-05,
"loss": 2.4094,
"theoretical_loss": 3.3163500048505687,
"tokens_seen": 3052404736
},
{
"epoch": 0.2,
"learning_rate": 8.075278015397776e-05,
"loss": 2.4447,
"theoretical_loss": 3.3163390944972857,
"tokens_seen": 3052535808
},
{
"epoch": 0.2,
"objective/train/docs_used": 1673386,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.077202320098877,
"objective/train/theoretical_loss": 3.316328184743637,
"objective/train/tokens_used": 82718176,
"theoretical_loss": 3.316328184743637,
"tokens_seen": 3052666880
},
{
"epoch": 0.2,
"learning_rate": 8.071000855431994e-05,
"loss": 2.3368,
"theoretical_loss": 3.316328184743637,
"tokens_seen": 3052666880
},
{
"epoch": 0.2,
"learning_rate": 8.06672369546621e-05,
"loss": 2.578,
"theoretical_loss": 3.3163172755895634,
"tokens_seen": 3052797952
},
{
"epoch": 0.2,
"learning_rate": 8.062446535500429e-05,
"loss": 2.5108,
"theoretical_loss": 3.3163063670350055,
"tokens_seen": 3052929024
},
{
"epoch": 0.2,
"learning_rate": 8.058169375534646e-05,
"loss": 2.4558,
"theoretical_loss": 3.3162954590799054,
"tokens_seen": 3053060096
},
{
"epoch": 0.2,
"learning_rate": 8.053892215568862e-05,
"loss": 2.5339,
"theoretical_loss": 3.316284551724204,
"tokens_seen": 3053191168
},
{
"epoch": 0.2,
"learning_rate": 8.04961505560308e-05,
"loss": 2.3677,
"theoretical_loss": 3.3162736449678434,
"tokens_seen": 3053322240
},
{
"epoch": 0.2,
"learning_rate": 8.045337895637297e-05,
"loss": 2.5638,
"theoretical_loss": 3.3162627388107637,
"tokens_seen": 3053453312
},
{
"epoch": 0.2,
"learning_rate": 8.041060735671514e-05,
"loss": 2.4784,
"theoretical_loss": 3.316251833252908,
"tokens_seen": 3053584384
},
{
"epoch": 0.2,
"learning_rate": 8.036783575705732e-05,
"loss": 2.486,
"theoretical_loss": 3.3162409282942154,
"tokens_seen": 3053715456
},
{
"epoch": 0.2,
"learning_rate": 8.032506415739949e-05,
"loss": 2.4993,
"theoretical_loss": 3.316230023934629,
"tokens_seen": 3053846528
},
{
"epoch": 0.21,
"learning_rate": 8.028229255774167e-05,
"loss": 2.5384,
"theoretical_loss": 3.3162191201740896,
"tokens_seen": 3053977600
},
{
"epoch": 0.21,
"learning_rate": 8.023952095808383e-05,
"loss": 2.5113,
"theoretical_loss": 3.3162082170125387,
"tokens_seen": 3054108672
},
{
"epoch": 0.21,
"learning_rate": 8.019674935842601e-05,
"loss": 2.2871,
"theoretical_loss": 3.3161973144499175,
"tokens_seen": 3054239744
},
{
"epoch": 0.21,
"objective/train/docs_used": 1673816,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4686410427093506,
"objective/train/theoretical_loss": 3.316191863393187,
"objective/train/tokens_used": 84356576,
"theoretical_loss": 3.316191863393187,
"tokens_seen": 3054305280
},
{
"epoch": 0.21,
"learning_rate": 8.015397775876819e-05,
"loss": 2.4141,
"theoretical_loss": 3.3161864124861675,
"tokens_seen": 3054370816
},
{
"epoch": 0.21,
"learning_rate": 8.011120615911035e-05,
"loss": 2.5008,
"theoretical_loss": 3.31617551112123,
"tokens_seen": 3054501888
},
{
"epoch": 0.21,
"learning_rate": 8.006843455945253e-05,
"loss": 2.5764,
"theoretical_loss": 3.316164610355047,
"tokens_seen": 3054632960
},
{
"epoch": 0.21,
"learning_rate": 8.00256629597947e-05,
"loss": 2.4664,
"theoretical_loss": 3.316153710187559,
"tokens_seen": 3054764032
},
{
"epoch": 0.21,
"learning_rate": 7.998289136013687e-05,
"loss": 2.5636,
"theoretical_loss": 3.316142810618708,
"tokens_seen": 3054895104
},
{
"epoch": 0.21,
"learning_rate": 7.994011976047905e-05,
"loss": 2.54,
"theoretical_loss": 3.3161319116484353,
"tokens_seen": 3055026176
},
{
"epoch": 0.21,
"learning_rate": 7.989734816082122e-05,
"loss": 2.6878,
"theoretical_loss": 3.3161210132766823,
"tokens_seen": 3055157248
},
{
"epoch": 0.21,
"learning_rate": 7.98545765611634e-05,
"loss": 2.6299,
"theoretical_loss": 3.316110115503391,
"tokens_seen": 3055288320
},
{
"epoch": 0.21,
"learning_rate": 7.981180496150556e-05,
"loss": 2.476,
"theoretical_loss": 3.316099218328502,
"tokens_seen": 3055419392
},
{
"epoch": 0.21,
"learning_rate": 7.976903336184774e-05,
"loss": 2.5998,
"theoretical_loss": 3.3160883217519572,
"tokens_seen": 3055550464
},
{
"epoch": 0.21,
"learning_rate": 7.972626176218992e-05,
"loss": 2.4002,
"theoretical_loss": 3.316077425773698,
"tokens_seen": 3055681536
},
{
"epoch": 0.21,
"learning_rate": 7.968349016253208e-05,
"loss": 2.5102,
"theoretical_loss": 3.316066530393666,
"tokens_seen": 3055812608
},
{
"epoch": 0.21,
"objective/train/docs_used": 1674780,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8518364429473877,
"objective/train/theoretical_loss": 3.3160556356118027,
"objective/train/tokens_used": 85994976,
"theoretical_loss": 3.3160556356118027,
"tokens_seen": 3055943680
},
{
"epoch": 0.21,
"learning_rate": 7.964071856287424e-05,
"loss": 2.5794,
"theoretical_loss": 3.3160556356118027,
"tokens_seen": 3055943680
},
{
"epoch": 0.21,
"learning_rate": 7.959794696321644e-05,
"loss": 2.6511,
"theoretical_loss": 3.3160447414280494,
"tokens_seen": 3056074752
},
{
"epoch": 0.21,
"learning_rate": 7.95551753635586e-05,
"loss": 2.4842,
"theoretical_loss": 3.316033847842348,
"tokens_seen": 3056205824
}
],
"max_steps": 2362,
"num_train_epochs": 9223372036854775807,
"total_flos": 3.3579161616384e+16,
"trial_name": null,
"trial_params": null
}