|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.31879762912785775, |
|
"global_step": 753, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 3.0643, |
|
"theoretical_loss": 3.321567680436603, |
|
"tokens_seen": 2990538752 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 3.0798, |
|
"theoretical_loss": 3.3215564803546, |
|
"tokens_seen": 2990669824 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.25e-05, |
|
"loss": 2.9318, |
|
"theoretical_loss": 3.321545280900887, |
|
"tokens_seen": 2990800896 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 2.8098, |
|
"theoretical_loss": 3.3215340820754022, |
|
"tokens_seen": 2990931968 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 2.7055, |
|
"theoretical_loss": 3.3215228838780817, |
|
"tokens_seen": 2991063040 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 2.9762, |
|
"theoretical_loss": 3.3215116863088636, |
|
"tokens_seen": 2991194112 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 2.8724, |
|
"theoretical_loss": 3.3215004893676854, |
|
"tokens_seen": 2991325184 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 3.0452, |
|
"theoretical_loss": 3.321489293054483, |
|
"tokens_seen": 2991456256 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 2.8676, |
|
"theoretical_loss": 3.321478097369195, |
|
"tokens_seen": 2991587328 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 2.8343, |
|
"theoretical_loss": 3.321466902311758, |
|
"tokens_seen": 2991718400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 2.743, |
|
"theoretical_loss": 3.3214557078821096, |
|
"tokens_seen": 2991849472 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5e-05, |
|
"loss": 2.5867, |
|
"theoretical_loss": 3.321444514080187, |
|
"tokens_seen": 2991980544 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 1640856, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7297048568725586, |
|
"objective/train/theoretical_loss": 3.321438917414603, |
|
"objective/train/tokens_used": 22097376, |
|
"theoretical_loss": 3.321438917414603, |
|
"tokens_seen": 2992046080 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.4166666666666664e-05, |
|
"loss": 2.7262, |
|
"theoretical_loss": 3.321433320905927, |
|
"tokens_seen": 2992111616 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.833333333333334e-05, |
|
"loss": 2.6517, |
|
"theoretical_loss": 3.3214221283592678, |
|
"tokens_seen": 2992242688 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.25e-05, |
|
"loss": 2.9399, |
|
"theoretical_loss": 3.321410936440146, |
|
"tokens_seen": 2992373760 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 2.7939, |
|
"theoretical_loss": 3.3213997451485, |
|
"tokens_seen": 2992504832 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.083333333333334e-05, |
|
"loss": 2.5715, |
|
"theoretical_loss": 3.3213885544842654, |
|
"tokens_seen": 2992635904 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.6047, |
|
"theoretical_loss": 3.321377364447381, |
|
"tokens_seen": 2992766976 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.916666666666666e-05, |
|
"loss": 2.6736, |
|
"theoretical_loss": 3.3213661750377836, |
|
"tokens_seen": 2992898048 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.333333333333334e-05, |
|
"loss": 2.6853, |
|
"theoretical_loss": 3.3213549862554106, |
|
"tokens_seen": 2993029120 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.75e-05, |
|
"loss": 2.3665, |
|
"theoretical_loss": 3.3213437981001994, |
|
"tokens_seen": 2993160192 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.166666666666667e-05, |
|
"loss": 2.618, |
|
"theoretical_loss": 3.3213326105720875, |
|
"tokens_seen": 2993291264 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.583333333333334e-05, |
|
"loss": 2.6614, |
|
"theoretical_loss": 3.3213214236710122, |
|
"tokens_seen": 2993422336 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001, |
|
"loss": 2.6163, |
|
"theoretical_loss": 3.321310237396911, |
|
"tokens_seen": 2993553408 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 1641461, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5642035007476807, |
|
"objective/train/theoretical_loss": 3.3212990517497207, |
|
"objective/train/tokens_used": 23735776, |
|
"theoretical_loss": 3.3212990517497207, |
|
"tokens_seen": 2993684480 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.995722840034217e-05, |
|
"loss": 2.7218, |
|
"theoretical_loss": 3.3212990517497207, |
|
"tokens_seen": 2993684480 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.991445680068435e-05, |
|
"loss": 2.5137, |
|
"theoretical_loss": 3.3212878667293797, |
|
"tokens_seen": 2993815552 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.987168520102653e-05, |
|
"loss": 2.5283, |
|
"theoretical_loss": 3.321276682335825, |
|
"tokens_seen": 2993946624 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.98289136013687e-05, |
|
"loss": 2.6367, |
|
"theoretical_loss": 3.3212654985689936, |
|
"tokens_seen": 2994077696 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.978614200171087e-05, |
|
"loss": 2.5822, |
|
"theoretical_loss": 3.3212543154288237, |
|
"tokens_seen": 2994208768 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.974337040205303e-05, |
|
"loss": 2.6296, |
|
"theoretical_loss": 3.3212431329152525, |
|
"tokens_seen": 2994339840 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.970059880239521e-05, |
|
"loss": 2.5596, |
|
"theoretical_loss": 3.321231951028217, |
|
"tokens_seen": 2994470912 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.965782720273739e-05, |
|
"loss": 2.5663, |
|
"theoretical_loss": 3.3212207697676552, |
|
"tokens_seen": 2994601984 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.961505560307956e-05, |
|
"loss": 2.5138, |
|
"theoretical_loss": 3.3212095891335043, |
|
"tokens_seen": 2994733056 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.957228400342173e-05, |
|
"loss": 2.5938, |
|
"theoretical_loss": 3.321198409125702, |
|
"tokens_seen": 2994864128 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.95295124037639e-05, |
|
"loss": 2.4583, |
|
"theoretical_loss": 3.321187229744186, |
|
"tokens_seen": 2994995200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.948674080410608e-05, |
|
"loss": 2.5265, |
|
"theoretical_loss": 3.321176050988893, |
|
"tokens_seen": 2995126272 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.944396920444825e-05, |
|
"loss": 2.7376, |
|
"theoretical_loss": 3.3211648728597614, |
|
"tokens_seen": 2995257344 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 1642666, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7178521156311035, |
|
"objective/train/theoretical_loss": 3.3211592840299864, |
|
"objective/train/tokens_used": 25374176, |
|
"theoretical_loss": 3.3211592840299864, |
|
"tokens_seen": 2995322880 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.940119760479042e-05, |
|
"loss": 2.4464, |
|
"theoretical_loss": 3.3211536953567284, |
|
"tokens_seen": 2995388416 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.93584260051326e-05, |
|
"loss": 2.657, |
|
"theoretical_loss": 3.321142518479731, |
|
"tokens_seen": 2995519488 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.931565440547476e-05, |
|
"loss": 2.5525, |
|
"theoretical_loss": 3.321131342228708, |
|
"tokens_seen": 2995650560 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.927288280581694e-05, |
|
"loss": 2.6099, |
|
"theoretical_loss": 3.321120166603596, |
|
"tokens_seen": 2995781632 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.923011120615912e-05, |
|
"loss": 2.4712, |
|
"theoretical_loss": 3.3211089916043326, |
|
"tokens_seen": 2995912704 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.918733960650128e-05, |
|
"loss": 2.514, |
|
"theoretical_loss": 3.3210978172308554, |
|
"tokens_seen": 2996043776 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.914456800684346e-05, |
|
"loss": 2.4037, |
|
"theoretical_loss": 3.3210866434831026, |
|
"tokens_seen": 2996174848 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.910179640718563e-05, |
|
"loss": 2.4715, |
|
"theoretical_loss": 3.3210754703610106, |
|
"tokens_seen": 2996305920 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.90590248075278e-05, |
|
"loss": 2.4805, |
|
"theoretical_loss": 3.321064297864518, |
|
"tokens_seen": 2996436992 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.901625320786998e-05, |
|
"loss": 2.4755, |
|
"theoretical_loss": 3.3210531259935627, |
|
"tokens_seen": 2996568064 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.897348160821215e-05, |
|
"loss": 2.5174, |
|
"theoretical_loss": 3.321041954748081, |
|
"tokens_seen": 2996699136 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.893071000855433e-05, |
|
"loss": 2.5996, |
|
"theoretical_loss": 3.321030784128012, |
|
"tokens_seen": 2996830208 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 1643300, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.879695415496826, |
|
"objective/train/theoretical_loss": 3.321019614133292, |
|
"objective/train/tokens_used": 27012576, |
|
"theoretical_loss": 3.321019614133292, |
|
"tokens_seen": 2996961280 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.888793840889649e-05, |
|
"loss": 2.5696, |
|
"theoretical_loss": 3.321019614133292, |
|
"tokens_seen": 2996961280 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.884516680923867e-05, |
|
"loss": 2.607, |
|
"theoretical_loss": 3.3210084447638595, |
|
"tokens_seen": 2997092352 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.880239520958085e-05, |
|
"loss": 2.4604, |
|
"theoretical_loss": 3.320997276019652, |
|
"tokens_seen": 2997223424 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.875962360992301e-05, |
|
"loss": 2.4603, |
|
"theoretical_loss": 3.3209861079006067, |
|
"tokens_seen": 2997354496 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.871685201026519e-05, |
|
"loss": 2.4374, |
|
"theoretical_loss": 3.320974940406662, |
|
"tokens_seen": 2997485568 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.867408041060736e-05, |
|
"loss": 2.5285, |
|
"theoretical_loss": 3.320963773537755, |
|
"tokens_seen": 2997616640 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.863130881094953e-05, |
|
"loss": 2.5895, |
|
"theoretical_loss": 3.320952607293824, |
|
"tokens_seen": 2997747712 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.858853721129171e-05, |
|
"loss": 2.5835, |
|
"theoretical_loss": 3.320941441674806, |
|
"tokens_seen": 2997878784 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.854576561163388e-05, |
|
"loss": 2.7362, |
|
"theoretical_loss": 3.320930276680639, |
|
"tokens_seen": 2998009856 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.850299401197606e-05, |
|
"loss": 2.5553, |
|
"theoretical_loss": 3.3209191123112607, |
|
"tokens_seen": 2998140928 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.846022241231822e-05, |
|
"loss": 2.6403, |
|
"theoretical_loss": 3.320907948566609, |
|
"tokens_seen": 2998272000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.84174508126604e-05, |
|
"loss": 2.5129, |
|
"theoretical_loss": 3.3208967854466214, |
|
"tokens_seen": 2998403072 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.837467921300258e-05, |
|
"loss": 2.4238, |
|
"theoretical_loss": 3.3208856229512356, |
|
"tokens_seen": 2998534144 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 1644380, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4041128158569336, |
|
"objective/train/theoretical_loss": 3.320880041937749, |
|
"objective/train/tokens_used": 28650976, |
|
"theoretical_loss": 3.320880041937749, |
|
"tokens_seen": 2998599680 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.833190761334474e-05, |
|
"loss": 2.3705, |
|
"theoretical_loss": 3.3208744610803898, |
|
"tokens_seen": 2998665216 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.828913601368692e-05, |
|
"loss": 2.4602, |
|
"theoretical_loss": 3.320863299834021, |
|
"tokens_seen": 2998796288 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.824636441402908e-05, |
|
"loss": 2.5492, |
|
"theoretical_loss": 3.320852139212068, |
|
"tokens_seen": 2998927360 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.820359281437126e-05, |
|
"loss": 2.426, |
|
"theoretical_loss": 3.3208409792144677, |
|
"tokens_seen": 2999058432 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.816082121471344e-05, |
|
"loss": 2.4079, |
|
"theoretical_loss": 3.320829819841158, |
|
"tokens_seen": 2999189504 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.81180496150556e-05, |
|
"loss": 2.5189, |
|
"theoretical_loss": 3.320818661092077, |
|
"tokens_seen": 2999320576 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.807527801539777e-05, |
|
"loss": 2.4085, |
|
"theoretical_loss": 3.3208075029671624, |
|
"tokens_seen": 2999451648 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.803250641573995e-05, |
|
"loss": 2.4031, |
|
"theoretical_loss": 3.320796345466352, |
|
"tokens_seen": 2999582720 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.798973481608213e-05, |
|
"loss": 2.5067, |
|
"theoretical_loss": 3.320785188589584, |
|
"tokens_seen": 2999713792 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.79469632164243e-05, |
|
"loss": 2.4357, |
|
"theoretical_loss": 3.3207740323367956, |
|
"tokens_seen": 2999844864 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.790419161676647e-05, |
|
"loss": 2.4929, |
|
"theoretical_loss": 3.3207628767079242, |
|
"tokens_seen": 2999975936 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.786142001710863e-05, |
|
"loss": 2.4052, |
|
"theoretical_loss": 3.3207517217029094, |
|
"tokens_seen": 3000107008 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 1645056, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3271520137786865, |
|
"objective/train/theoretical_loss": 3.3207405673216877, |
|
"objective/train/tokens_used": 30289376, |
|
"theoretical_loss": 3.3207405673216877, |
|
"tokens_seen": 3000238080 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.781864841745081e-05, |
|
"loss": 2.5201, |
|
"theoretical_loss": 3.3207405673216877, |
|
"tokens_seen": 3000238080 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.777587681779299e-05, |
|
"loss": 2.5431, |
|
"theoretical_loss": 3.320729413564197, |
|
"tokens_seen": 3000369152 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.773310521813517e-05, |
|
"loss": 2.5359, |
|
"theoretical_loss": 3.3207182604303753, |
|
"tokens_seen": 3000500224 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.769033361847733e-05, |
|
"loss": 2.1929, |
|
"theoretical_loss": 3.320707107920161, |
|
"tokens_seen": 3000631296 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.76475620188195e-05, |
|
"loss": 2.5196, |
|
"theoretical_loss": 3.3206959560334917, |
|
"tokens_seen": 3000762368 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.760479041916169e-05, |
|
"loss": 2.3645, |
|
"theoretical_loss": 3.320684804770305, |
|
"tokens_seen": 3000893440 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.756201881950386e-05, |
|
"loss": 2.5329, |
|
"theoretical_loss": 3.3206736541305393, |
|
"tokens_seen": 3001024512 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.751924721984602e-05, |
|
"loss": 2.533, |
|
"theoretical_loss": 3.3206625041141318, |
|
"tokens_seen": 3001155584 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.74764756201882e-05, |
|
"loss": 2.4702, |
|
"theoretical_loss": 3.3206513547210212, |
|
"tokens_seen": 3001286656 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.743370402053036e-05, |
|
"loss": 2.6255, |
|
"theoretical_loss": 3.320640205951145, |
|
"tokens_seen": 3001417728 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.739093242087256e-05, |
|
"loss": 2.5139, |
|
"theoretical_loss": 3.3206290578044415, |
|
"tokens_seen": 3001548800 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.734816082121472e-05, |
|
"loss": 2.3796, |
|
"theoretical_loss": 3.3206179102808484, |
|
"tokens_seen": 3001679872 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.730538922155689e-05, |
|
"loss": 2.4997, |
|
"theoretical_loss": 3.3206067633803036, |
|
"tokens_seen": 3001810944 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 1646327, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.398160219192505, |
|
"objective/train/theoretical_loss": 3.320601190163655, |
|
"objective/train/tokens_used": 31927776, |
|
"theoretical_loss": 3.320601190163655, |
|
"tokens_seen": 3001876480 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.726261762189906e-05, |
|
"loss": 2.3607, |
|
"theoretical_loss": 3.320595617102745, |
|
"tokens_seen": 3001942016 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.721984602224123e-05, |
|
"loss": 2.4803, |
|
"theoretical_loss": 3.320584471448111, |
|
"tokens_seen": 3002073088 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.717707442258342e-05, |
|
"loss": 2.348, |
|
"theoretical_loss": 3.3205733264163393, |
|
"tokens_seen": 3002204160 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.713430282292558e-05, |
|
"loss": 2.2828, |
|
"theoretical_loss": 3.320562182007368, |
|
"tokens_seen": 3002335232 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.709153122326775e-05, |
|
"loss": 2.4615, |
|
"theoretical_loss": 3.320551038221135, |
|
"tokens_seen": 3002466304 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.704875962360993e-05, |
|
"loss": 2.3988, |
|
"theoretical_loss": 3.3205398950575784, |
|
"tokens_seen": 3002597376 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.700598802395209e-05, |
|
"loss": 2.5582, |
|
"theoretical_loss": 3.320528752516636, |
|
"tokens_seen": 3002728448 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.696321642429428e-05, |
|
"loss": 2.3266, |
|
"theoretical_loss": 3.3205176105982463, |
|
"tokens_seen": 3002859520 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.692044482463645e-05, |
|
"loss": 2.5922, |
|
"theoretical_loss": 3.320506469302347, |
|
"tokens_seen": 3002990592 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.687767322497861e-05, |
|
"loss": 2.4959, |
|
"theoretical_loss": 3.3204953286288763, |
|
"tokens_seen": 3003121664 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.683490162532079e-05, |
|
"loss": 2.6068, |
|
"theoretical_loss": 3.3204841885777725, |
|
"tokens_seen": 3003252736 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.679213002566297e-05, |
|
"loss": 2.3996, |
|
"theoretical_loss": 3.3204730491489727, |
|
"tokens_seen": 3003383808 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 1647543, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3885486125946045, |
|
"objective/train/theoretical_loss": 3.3204619103424164, |
|
"objective/train/tokens_used": 33566176, |
|
"theoretical_loss": 3.3204619103424164, |
|
"tokens_seen": 3003514880 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.674935842600514e-05, |
|
"loss": 2.4692, |
|
"theoretical_loss": 3.3204619103424164, |
|
"tokens_seen": 3003514880 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.670658682634731e-05, |
|
"loss": 2.35, |
|
"theoretical_loss": 3.3204507721580403, |
|
"tokens_seen": 3003645952 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.666381522668948e-05, |
|
"loss": 2.3168, |
|
"theoretical_loss": 3.3204396345957834, |
|
"tokens_seen": 3003777024 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.662104362703166e-05, |
|
"loss": 2.4671, |
|
"theoretical_loss": 3.320428497655584, |
|
"tokens_seen": 3003908096 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.657827202737383e-05, |
|
"loss": 2.552, |
|
"theoretical_loss": 3.320417361337379, |
|
"tokens_seen": 3004039168 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.6535500427716e-05, |
|
"loss": 2.3902, |
|
"theoretical_loss": 3.3204062256411078, |
|
"tokens_seen": 3004170240 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.649272882805818e-05, |
|
"loss": 2.491, |
|
"theoretical_loss": 3.320395090566708, |
|
"tokens_seen": 3004301312 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.644995722840034e-05, |
|
"loss": 2.4628, |
|
"theoretical_loss": 3.3203839561141173, |
|
"tokens_seen": 3004432384 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.640718562874252e-05, |
|
"loss": 2.4858, |
|
"theoretical_loss": 3.320372822283275, |
|
"tokens_seen": 3004563456 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.63644140290847e-05, |
|
"loss": 2.5038, |
|
"theoretical_loss": 3.3203616890741183, |
|
"tokens_seen": 3004694528 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.632164242942686e-05, |
|
"loss": 2.2651, |
|
"theoretical_loss": 3.3203505564865856, |
|
"tokens_seen": 3004825600 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.627887082976904e-05, |
|
"loss": 2.4591, |
|
"theoretical_loss": 3.3203394245206153, |
|
"tokens_seen": 3004956672 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.623609923011121e-05, |
|
"loss": 2.4003, |
|
"theoretical_loss": 3.320328293176145, |
|
"tokens_seen": 3005087744 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 1648109, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3964030742645264, |
|
"objective/train/theoretical_loss": 3.3203227277369534, |
|
"objective/train/tokens_used": 35204576, |
|
"theoretical_loss": 3.3203227277369534, |
|
"tokens_seen": 3005153280 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.619332763045337e-05, |
|
"loss": 2.3952, |
|
"theoretical_loss": 3.320317162453114, |
|
"tokens_seen": 3005218816 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.615055603079556e-05, |
|
"loss": 2.4367, |
|
"theoretical_loss": 3.3203060323514593, |
|
"tokens_seen": 3005349888 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.610778443113773e-05, |
|
"loss": 2.4468, |
|
"theoretical_loss": 3.3202949028711197, |
|
"tokens_seen": 3005480960 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.60650128314799e-05, |
|
"loss": 2.3551, |
|
"theoretical_loss": 3.3202837740120335, |
|
"tokens_seen": 3005612032 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.602224123182207e-05, |
|
"loss": 2.3886, |
|
"theoretical_loss": 3.3202726457741387, |
|
"tokens_seen": 3005743104 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.597946963216424e-05, |
|
"loss": 2.4953, |
|
"theoretical_loss": 3.320261518157374, |
|
"tokens_seen": 3005874176 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.593669803250643e-05, |
|
"loss": 2.3074, |
|
"theoretical_loss": 3.3202503911616765, |
|
"tokens_seen": 3006005248 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.589392643284859e-05, |
|
"loss": 2.4135, |
|
"theoretical_loss": 3.320239264786986, |
|
"tokens_seen": 3006136320 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.585115483319077e-05, |
|
"loss": 2.431, |
|
"theoretical_loss": 3.3202281390332393, |
|
"tokens_seen": 3006267392 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.580838323353294e-05, |
|
"loss": 2.3277, |
|
"theoretical_loss": 3.320217013900376, |
|
"tokens_seen": 3006398464 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.576561163387511e-05, |
|
"loss": 2.5083, |
|
"theoretical_loss": 3.3202058893883333, |
|
"tokens_seen": 3006529536 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.572284003421729e-05, |
|
"loss": 2.4582, |
|
"theoretical_loss": 3.3201947654970505, |
|
"tokens_seen": 3006660608 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 1649212, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5212416648864746, |
|
"objective/train/theoretical_loss": 3.320183642226465, |
|
"objective/train/tokens_used": 36842976, |
|
"theoretical_loss": 3.320183642226465, |
|
"tokens_seen": 3006791680 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.568006843455946e-05, |
|
"loss": 2.3596, |
|
"theoretical_loss": 3.320183642226465, |
|
"tokens_seen": 3006791680 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.563729683490164e-05, |
|
"loss": 2.3923, |
|
"theoretical_loss": 3.3201725195765155, |
|
"tokens_seen": 3006922752 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.55945252352438e-05, |
|
"loss": 2.3563, |
|
"theoretical_loss": 3.3201613975471402, |
|
"tokens_seen": 3007053824 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.555175363558598e-05, |
|
"loss": 2.4285, |
|
"theoretical_loss": 3.3201502761382775, |
|
"tokens_seen": 3007184896 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.550898203592816e-05, |
|
"loss": 2.2928, |
|
"theoretical_loss": 3.320139155349866, |
|
"tokens_seen": 3007315968 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.546621043627032e-05, |
|
"loss": 2.4617, |
|
"theoretical_loss": 3.3201280351818436, |
|
"tokens_seen": 3007447040 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.542343883661249e-05, |
|
"loss": 2.4107, |
|
"theoretical_loss": 3.320116915634149, |
|
"tokens_seen": 3007578112 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.538066723695466e-05, |
|
"loss": 2.5715, |
|
"theoretical_loss": 3.3201057967067205, |
|
"tokens_seen": 3007709184 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.533789563729684e-05, |
|
"loss": 2.4217, |
|
"theoretical_loss": 3.3200946783994962, |
|
"tokens_seen": 3007840256 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.529512403763902e-05, |
|
"loss": 2.4315, |
|
"theoretical_loss": 3.3200835607124146, |
|
"tokens_seen": 3007971328 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.525235243798119e-05, |
|
"loss": 2.4307, |
|
"theoretical_loss": 3.3200724436454143, |
|
"tokens_seen": 3008102400 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.520958083832335e-05, |
|
"loss": 2.5032, |
|
"theoretical_loss": 3.3200613271984336, |
|
"tokens_seen": 3008233472 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.516680923866553e-05, |
|
"loss": 2.4379, |
|
"theoretical_loss": 3.3200502113714108, |
|
"tokens_seen": 3008364544 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 1649940, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.399945020675659, |
|
"objective/train/theoretical_loss": 3.3200446536903643, |
|
"objective/train/tokens_used": 38481376, |
|
"theoretical_loss": 3.3200446536903643, |
|
"tokens_seen": 3008430080 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.512403763900771e-05, |
|
"loss": 2.5107, |
|
"theoretical_loss": 3.3200390961642845, |
|
"tokens_seen": 3008495616 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.508126603934989e-05, |
|
"loss": 2.4659, |
|
"theoretical_loss": 3.3200279815769926, |
|
"tokens_seen": 3008626688 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.503849443969205e-05, |
|
"loss": 2.4327, |
|
"theoretical_loss": 3.3200168676094743, |
|
"tokens_seen": 3008757760 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.499572284003421e-05, |
|
"loss": 2.2681, |
|
"theoretical_loss": 3.320005754261668, |
|
"tokens_seen": 3008888832 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.49529512403764e-05, |
|
"loss": 2.3802, |
|
"theoretical_loss": 3.319994641533511, |
|
"tokens_seen": 3009019904 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.491017964071857e-05, |
|
"loss": 2.5461, |
|
"theoretical_loss": 3.319983529424943, |
|
"tokens_seen": 3009150976 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.486740804106075e-05, |
|
"loss": 2.4021, |
|
"theoretical_loss": 3.3199724179359027, |
|
"tokens_seen": 3009282048 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.482463644140291e-05, |
|
"loss": 2.4038, |
|
"theoretical_loss": 3.319961307066327, |
|
"tokens_seen": 3009413120 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.478186484174508e-05, |
|
"loss": 2.4687, |
|
"theoretical_loss": 3.3199501968161558, |
|
"tokens_seen": 3009544192 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.473909324208726e-05, |
|
"loss": 2.4401, |
|
"theoretical_loss": 3.319939087185327, |
|
"tokens_seen": 3009675264 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.469632164242944e-05, |
|
"loss": 2.4778, |
|
"theoretical_loss": 3.3199279781737796, |
|
"tokens_seen": 3009806336 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.46535500427716e-05, |
|
"loss": 2.4166, |
|
"theoretical_loss": 3.3199168697814514, |
|
"tokens_seen": 3009937408 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 1651249, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.412529230117798, |
|
"objective/train/theoretical_loss": 3.3199057620082812, |
|
"objective/train/tokens_used": 40119776, |
|
"theoretical_loss": 3.3199057620082812, |
|
"tokens_seen": 3010068480 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.461077844311378e-05, |
|
"loss": 2.3839, |
|
"theoretical_loss": 3.3199057620082812, |
|
"tokens_seen": 3010068480 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.456800684345594e-05, |
|
"loss": 2.5879, |
|
"theoretical_loss": 3.319894654854208, |
|
"tokens_seen": 3010199552 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.452523524379812e-05, |
|
"loss": 2.444, |
|
"theoretical_loss": 3.3198835483191695, |
|
"tokens_seen": 3010330624 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.44824636441403e-05, |
|
"loss": 2.2849, |
|
"theoretical_loss": 3.319872442403105, |
|
"tokens_seen": 3010461696 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.443969204448247e-05, |
|
"loss": 2.568, |
|
"theoretical_loss": 3.3198613371059524, |
|
"tokens_seen": 3010592768 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.439692044482464e-05, |
|
"loss": 2.4131, |
|
"theoretical_loss": 3.319850232427651, |
|
"tokens_seen": 3010723840 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.435414884516681e-05, |
|
"loss": 2.4222, |
|
"theoretical_loss": 3.3198391283681383, |
|
"tokens_seen": 3010854912 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.431137724550899e-05, |
|
"loss": 2.3993, |
|
"theoretical_loss": 3.3198280249273546, |
|
"tokens_seen": 3010985984 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.426860564585116e-05, |
|
"loss": 2.5294, |
|
"theoretical_loss": 3.319816922105237, |
|
"tokens_seen": 3011117056 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.422583404619333e-05, |
|
"loss": 2.4529, |
|
"theoretical_loss": 3.319805819901724, |
|
"tokens_seen": 3011248128 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.418306244653551e-05, |
|
"loss": 2.4924, |
|
"theoretical_loss": 3.3197947183167553, |
|
"tokens_seen": 3011379200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.414029084687767e-05, |
|
"loss": 2.6054, |
|
"theoretical_loss": 3.319783617350269, |
|
"tokens_seen": 3011510272 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.409751924721985e-05, |
|
"loss": 2.388, |
|
"theoretical_loss": 3.319772517002204, |
|
"tokens_seen": 3011641344 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 1651905, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8164331912994385, |
|
"objective/train/theoretical_loss": 3.31976696706006, |
|
"objective/train/tokens_used": 41758176, |
|
"theoretical_loss": 3.31976696706006, |
|
"tokens_seen": 3011706880 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.405474764756203e-05, |
|
"loss": 2.4623, |
|
"theoretical_loss": 3.319761417272498, |
|
"tokens_seen": 3011772416 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.40119760479042e-05, |
|
"loss": 2.3955, |
|
"theoretical_loss": 3.319750318161091, |
|
"tokens_seen": 3011903488 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.396920444824637e-05, |
|
"loss": 2.2424, |
|
"theoretical_loss": 3.3197392196679205, |
|
"tokens_seen": 3012034560 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.392643284858854e-05, |
|
"loss": 2.3568, |
|
"theoretical_loss": 3.3197281217929255, |
|
"tokens_seen": 3012165632 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.388366124893072e-05, |
|
"loss": 2.3788, |
|
"theoretical_loss": 3.319717024536045, |
|
"tokens_seen": 3012296704 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.38408896492729e-05, |
|
"loss": 2.3081, |
|
"theoretical_loss": 3.3197059278972176, |
|
"tokens_seen": 3012427776 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.379811804961506e-05, |
|
"loss": 2.4277, |
|
"theoretical_loss": 3.3196948318763817, |
|
"tokens_seen": 3012558848 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.375534644995724e-05, |
|
"loss": 2.4135, |
|
"theoretical_loss": 3.319683736473476, |
|
"tokens_seen": 3012689920 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.37125748502994e-05, |
|
"loss": 2.3474, |
|
"theoretical_loss": 3.3196726416884395, |
|
"tokens_seen": 3012820992 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.366980325064158e-05, |
|
"loss": 2.5489, |
|
"theoretical_loss": 3.3196615475212106, |
|
"tokens_seen": 3012952064 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.362703165098376e-05, |
|
"loss": 2.4998, |
|
"theoretical_loss": 3.3196504539717284, |
|
"tokens_seen": 3013083136 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.358426005132592e-05, |
|
"loss": 2.5438, |
|
"theoretical_loss": 3.3196393610399317, |
|
"tokens_seen": 3013214208 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 1652881, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3386740684509277, |
|
"objective/train/theoretical_loss": 3.3196282687257583, |
|
"objective/train/tokens_used": 43396576, |
|
"theoretical_loss": 3.3196282687257583, |
|
"tokens_seen": 3013345280 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.35414884516681e-05, |
|
"loss": 2.5351, |
|
"theoretical_loss": 3.3196282687257583, |
|
"tokens_seen": 3013345280 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.349871685201027e-05, |
|
"loss": 2.3127, |
|
"theoretical_loss": 3.3196171770291483, |
|
"tokens_seen": 3013476352 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.345594525235244e-05, |
|
"loss": 2.435, |
|
"theoretical_loss": 3.3196060859500394, |
|
"tokens_seen": 3013607424 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.341317365269462e-05, |
|
"loss": 2.3466, |
|
"theoretical_loss": 3.319594995488371, |
|
"tokens_seen": 3013738496 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.337040205303679e-05, |
|
"loss": 2.5683, |
|
"theoretical_loss": 3.3195839056440812, |
|
"tokens_seen": 3013869568 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.332763045337895e-05, |
|
"loss": 2.4838, |
|
"theoretical_loss": 3.3195728164171094, |
|
"tokens_seen": 3014000640 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.328485885372113e-05, |
|
"loss": 2.5372, |
|
"theoretical_loss": 3.319561727807394, |
|
"tokens_seen": 3014131712 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.324208725406331e-05, |
|
"loss": 2.4055, |
|
"theoretical_loss": 3.3195506398148744, |
|
"tokens_seen": 3014262784 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.319931565440549e-05, |
|
"loss": 2.5566, |
|
"theoretical_loss": 3.319539552439489, |
|
"tokens_seen": 3014393856 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.315654405474765e-05, |
|
"loss": 2.4051, |
|
"theoretical_loss": 3.3195284656811763, |
|
"tokens_seen": 3014524928 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.311377245508982e-05, |
|
"loss": 2.5032, |
|
"theoretical_loss": 3.319517379539876, |
|
"tokens_seen": 3014656000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.3071000855432e-05, |
|
"loss": 2.427, |
|
"theoretical_loss": 3.3195062940155258, |
|
"tokens_seen": 3014787072 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.302822925577417e-05, |
|
"loss": 2.4468, |
|
"theoretical_loss": 3.3194952091080654, |
|
"tokens_seen": 3014918144 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"objective/train/docs_used": 1653310, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2789435386657715, |
|
"objective/train/theoretical_loss": 3.3194896668856497, |
|
"objective/train/tokens_used": 45034976, |
|
"theoretical_loss": 3.3194896668856497, |
|
"tokens_seen": 3014983680 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.298545765611635e-05, |
|
"loss": 2.3447, |
|
"theoretical_loss": 3.3194841248174334, |
|
"tokens_seen": 3015049216 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.294268605645852e-05, |
|
"loss": 2.3587, |
|
"theoretical_loss": 3.3194730411435684, |
|
"tokens_seen": 3015180288 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.289991445680068e-05, |
|
"loss": 2.6264, |
|
"theoretical_loss": 3.3194619580864098, |
|
"tokens_seen": 3015311360 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.285714285714286e-05, |
|
"loss": 2.57, |
|
"theoretical_loss": 3.3194508756458965, |
|
"tokens_seen": 3015442432 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.281437125748504e-05, |
|
"loss": 2.3972, |
|
"theoretical_loss": 3.319439793821967, |
|
"tokens_seen": 3015573504 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.27715996578272e-05, |
|
"loss": 2.4522, |
|
"theoretical_loss": 3.3194287126145596, |
|
"tokens_seen": 3015704576 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.272882805816938e-05, |
|
"loss": 2.4546, |
|
"theoretical_loss": 3.3194176320236144, |
|
"tokens_seen": 3015835648 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.268605645851154e-05, |
|
"loss": 2.6088, |
|
"theoretical_loss": 3.31940655204907, |
|
"tokens_seen": 3015966720 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.264328485885372e-05, |
|
"loss": 2.4454, |
|
"theoretical_loss": 3.319395472690865, |
|
"tokens_seen": 3016097792 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.26005132591959e-05, |
|
"loss": 2.3876, |
|
"theoretical_loss": 3.3193843939489382, |
|
"tokens_seen": 3016228864 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.255774165953807e-05, |
|
"loss": 2.4971, |
|
"theoretical_loss": 3.319373315823229, |
|
"tokens_seen": 3016359936 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.251497005988024e-05, |
|
"loss": 2.5668, |
|
"theoretical_loss": 3.3193622383136763, |
|
"tokens_seen": 3016491008 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"objective/train/docs_used": 1654644, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.148563861846924, |
|
"objective/train/theoretical_loss": 3.3193511614202187, |
|
"objective/train/tokens_used": 46673376, |
|
"theoretical_loss": 3.3193511614202187, |
|
"tokens_seen": 3016622080 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.247219846022241e-05, |
|
"loss": 2.3435, |
|
"theoretical_loss": 3.3193511614202187, |
|
"tokens_seen": 3016622080 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.242942686056459e-05, |
|
"loss": 2.5671, |
|
"theoretical_loss": 3.319340085142796, |
|
"tokens_seen": 3016753152 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.238665526090677e-05, |
|
"loss": 2.455, |
|
"theoretical_loss": 3.319329009481346, |
|
"tokens_seen": 3016884224 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.234388366124893e-05, |
|
"loss": 2.5706, |
|
"theoretical_loss": 3.3193179344358086, |
|
"tokens_seen": 3017015296 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.230111206159111e-05, |
|
"loss": 2.3164, |
|
"theoretical_loss": 3.319306860006122, |
|
"tokens_seen": 3017146368 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.225834046193327e-05, |
|
"loss": 2.317, |
|
"theoretical_loss": 3.319295786192226, |
|
"tokens_seen": 3017277440 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.221556886227547e-05, |
|
"loss": 2.3955, |
|
"theoretical_loss": 3.319284712994059, |
|
"tokens_seen": 3017408512 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.217279726261763e-05, |
|
"loss": 2.4648, |
|
"theoretical_loss": 3.3192736404115606, |
|
"tokens_seen": 3017539584 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.21300256629598e-05, |
|
"loss": 2.3474, |
|
"theoretical_loss": 3.3192625684446693, |
|
"tokens_seen": 3017670656 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.208725406330197e-05, |
|
"loss": 2.2225, |
|
"theoretical_loss": 3.3192514970933242, |
|
"tokens_seen": 3017801728 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.204448246364414e-05, |
|
"loss": 2.4497, |
|
"theoretical_loss": 3.319240426357465, |
|
"tokens_seen": 3017932800 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.200171086398632e-05, |
|
"loss": 2.4471, |
|
"theoretical_loss": 3.31922935623703, |
|
"tokens_seen": 3018063872 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.19589392643285e-05, |
|
"loss": 2.3816, |
|
"theoretical_loss": 3.3192182867319584, |
|
"tokens_seen": 3018194944 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"objective/train/docs_used": 1655335, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.1034493446350098, |
|
"objective/train/theoretical_loss": 3.319212752210165, |
|
"objective/train/tokens_used": 48311776, |
|
"theoretical_loss": 3.319212752210165, |
|
"tokens_seen": 3018260480 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.191616766467066e-05, |
|
"loss": 2.3561, |
|
"theoretical_loss": 3.3192072178421896, |
|
"tokens_seen": 3018326016 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.187339606501284e-05, |
|
"loss": 2.4573, |
|
"theoretical_loss": 3.319196149567662, |
|
"tokens_seen": 3018457088 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.1830624465355e-05, |
|
"loss": 2.3837, |
|
"theoretical_loss": 3.3191850819083157, |
|
"tokens_seen": 3018588160 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.178785286569718e-05, |
|
"loss": 2.4923, |
|
"theoretical_loss": 3.319174014864089, |
|
"tokens_seen": 3018719232 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.174508126603936e-05, |
|
"loss": 2.6121, |
|
"theoretical_loss": 3.319162948434921, |
|
"tokens_seen": 3018850304 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.170230966638152e-05, |
|
"loss": 2.5103, |
|
"theoretical_loss": 3.319151882620752, |
|
"tokens_seen": 3018981376 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.16595380667237e-05, |
|
"loss": 2.3644, |
|
"theoretical_loss": 3.3191408174215193, |
|
"tokens_seen": 3019112448 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.161676646706587e-05, |
|
"loss": 2.5582, |
|
"theoretical_loss": 3.3191297528371635, |
|
"tokens_seen": 3019243520 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.157399486740804e-05, |
|
"loss": 2.3977, |
|
"theoretical_loss": 3.319118688867623, |
|
"tokens_seen": 3019374592 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.153122326775022e-05, |
|
"loss": 2.3253, |
|
"theoretical_loss": 3.319107625512837, |
|
"tokens_seen": 3019505664 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.148845166809239e-05, |
|
"loss": 2.5582, |
|
"theoretical_loss": 3.3190965627727445, |
|
"tokens_seen": 3019636736 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.144568006843457e-05, |
|
"loss": 2.402, |
|
"theoretical_loss": 3.3190855006472857, |
|
"tokens_seen": 3019767808 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"objective/train/docs_used": 1656670, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.027528762817383, |
|
"objective/train/theoretical_loss": 3.3190744391363984, |
|
"objective/train/tokens_used": 49950176, |
|
"theoretical_loss": 3.3190744391363984, |
|
"tokens_seen": 3019898880 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.140290846877674e-05, |
|
"loss": 2.4387, |
|
"theoretical_loss": 3.3190744391363984, |
|
"tokens_seen": 3019898880 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.136013686911891e-05, |
|
"loss": 2.5059, |
|
"theoretical_loss": 3.3190633782400223, |
|
"tokens_seen": 3020029952 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.131736526946109e-05, |
|
"loss": 2.3856, |
|
"theoretical_loss": 3.3190523179580973, |
|
"tokens_seen": 3020161024 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.127459366980325e-05, |
|
"loss": 2.5568, |
|
"theoretical_loss": 3.3190412582905617, |
|
"tokens_seen": 3020292096 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.123182207014542e-05, |
|
"loss": 2.5093, |
|
"theoretical_loss": 3.319030199237355, |
|
"tokens_seen": 3020423168 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.118905047048761e-05, |
|
"loss": 2.3476, |
|
"theoretical_loss": 3.3190191407984164, |
|
"tokens_seen": 3020554240 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.114627887082977e-05, |
|
"loss": 2.4233, |
|
"theoretical_loss": 3.3190080829736854, |
|
"tokens_seen": 3020685312 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.110350727117195e-05, |
|
"loss": 2.4302, |
|
"theoretical_loss": 3.318997025763101, |
|
"tokens_seen": 3020816384 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.106073567151412e-05, |
|
"loss": 2.5779, |
|
"theoretical_loss": 3.318985969166602, |
|
"tokens_seen": 3020947456 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.101796407185628e-05, |
|
"loss": 2.4611, |
|
"theoretical_loss": 3.3189749131841286, |
|
"tokens_seen": 3021078528 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.097519247219847e-05, |
|
"loss": 2.3387, |
|
"theoretical_loss": 3.3189638578156195, |
|
"tokens_seen": 3021209600 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.093242087254064e-05, |
|
"loss": 2.6853, |
|
"theoretical_loss": 3.3189528030610136, |
|
"tokens_seen": 3021340672 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.088964927288282e-05, |
|
"loss": 2.4887, |
|
"theoretical_loss": 3.318941748920251, |
|
"tokens_seen": 3021471744 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"objective/train/docs_used": 1657192, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7123944759368896, |
|
"objective/train/theoretical_loss": 3.318936222080042, |
|
"objective/train/tokens_used": 51588576, |
|
"theoretical_loss": 3.318936222080042, |
|
"tokens_seen": 3021537280 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.084687767322498e-05, |
|
"loss": 2.4619, |
|
"theoretical_loss": 3.318930695393271, |
|
"tokens_seen": 3021602816 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.080410607356715e-05, |
|
"loss": 2.4821, |
|
"theoretical_loss": 3.3189196424800116, |
|
"tokens_seen": 3021733888 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.076133447390934e-05, |
|
"loss": 2.4471, |
|
"theoretical_loss": 3.3189085901804134, |
|
"tokens_seen": 3021864960 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.07185628742515e-05, |
|
"loss": 2.3988, |
|
"theoretical_loss": 3.3188975384944155, |
|
"tokens_seen": 3021996032 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.067579127459367e-05, |
|
"loss": 2.469, |
|
"theoretical_loss": 3.318886487421957, |
|
"tokens_seen": 3022127104 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.063301967493585e-05, |
|
"loss": 2.5398, |
|
"theoretical_loss": 3.318875436962977, |
|
"tokens_seen": 3022258176 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.059024807527801e-05, |
|
"loss": 2.3125, |
|
"theoretical_loss": 3.3188643871174155, |
|
"tokens_seen": 3022389248 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.05474764756202e-05, |
|
"loss": 2.3502, |
|
"theoretical_loss": 3.318853337885211, |
|
"tokens_seen": 3022520320 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.050470487596237e-05, |
|
"loss": 2.4073, |
|
"theoretical_loss": 3.318842289266304, |
|
"tokens_seen": 3022651392 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.046193327630453e-05, |
|
"loss": 2.3899, |
|
"theoretical_loss": 3.3188312412606327, |
|
"tokens_seen": 3022782464 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.041916167664671e-05, |
|
"loss": 2.3516, |
|
"theoretical_loss": 3.3188201938681368, |
|
"tokens_seen": 3022913536 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.037639007698889e-05, |
|
"loss": 2.3968, |
|
"theoretical_loss": 3.318809147088756, |
|
"tokens_seen": 3023044608 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"objective/train/docs_used": 1658380, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6505117416381836, |
|
"objective/train/theoretical_loss": 3.3187981009224297, |
|
"objective/train/tokens_used": 53226976, |
|
"theoretical_loss": 3.3187981009224297, |
|
"tokens_seen": 3023175680 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.033361847733107e-05, |
|
"loss": 2.4709, |
|
"theoretical_loss": 3.3187981009224297, |
|
"tokens_seen": 3023175680 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.029084687767323e-05, |
|
"loss": 2.3951, |
|
"theoretical_loss": 3.3187870553690972, |
|
"tokens_seen": 3023306752 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.02480752780154e-05, |
|
"loss": 2.5401, |
|
"theoretical_loss": 3.3187760104286976, |
|
"tokens_seen": 3023437824 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.020530367835757e-05, |
|
"loss": 2.4571, |
|
"theoretical_loss": 3.3187649661011704, |
|
"tokens_seen": 3023568896 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.016253207869975e-05, |
|
"loss": 2.362, |
|
"theoretical_loss": 3.3187539223864557, |
|
"tokens_seen": 3023699968 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.011976047904193e-05, |
|
"loss": 2.4072, |
|
"theoretical_loss": 3.318742879284492, |
|
"tokens_seen": 3023831040 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.00769888793841e-05, |
|
"loss": 2.3097, |
|
"theoretical_loss": 3.3187318367952194, |
|
"tokens_seen": 3023962112 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.003421727972626e-05, |
|
"loss": 2.3313, |
|
"theoretical_loss": 3.318720794918577, |
|
"tokens_seen": 3024093184 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.999144568006844e-05, |
|
"loss": 2.3635, |
|
"theoretical_loss": 3.3187097536545047, |
|
"tokens_seen": 3024224256 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.994867408041062e-05, |
|
"loss": 2.4897, |
|
"theoretical_loss": 3.3186987130029415, |
|
"tokens_seen": 3024355328 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.990590248075278e-05, |
|
"loss": 2.5518, |
|
"theoretical_loss": 3.3186876729638266, |
|
"tokens_seen": 3024486400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.986313088109496e-05, |
|
"loss": 2.4191, |
|
"theoretical_loss": 3.3186766335371005, |
|
"tokens_seen": 3024617472 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.982035928143712e-05, |
|
"loss": 2.4865, |
|
"theoretical_loss": 3.318665594722702, |
|
"tokens_seen": 3024748544 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"objective/train/docs_used": 1658975, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6697089672088623, |
|
"objective/train/theoretical_loss": 3.3186600755451066, |
|
"objective/train/tokens_used": 54865376, |
|
"theoretical_loss": 3.3186600755451066, |
|
"tokens_seen": 3024814080 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.97775876817793e-05, |
|
"loss": 2.4613, |
|
"theoretical_loss": 3.3186545565205705, |
|
"tokens_seen": 3024879616 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.973481608212148e-05, |
|
"loss": 2.4463, |
|
"theoretical_loss": 3.318643518930646, |
|
"tokens_seen": 3025010688 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.969204448246365e-05, |
|
"loss": 2.3875, |
|
"theoretical_loss": 3.3186324819528674, |
|
"tokens_seen": 3025141760 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.964927288280582e-05, |
|
"loss": 2.4101, |
|
"theoretical_loss": 3.318621445587175, |
|
"tokens_seen": 3025272832 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.960650128314799e-05, |
|
"loss": 2.3602, |
|
"theoretical_loss": 3.3186104098335076, |
|
"tokens_seen": 3025403904 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.956372968349017e-05, |
|
"loss": 2.41, |
|
"theoretical_loss": 3.318599374691805, |
|
"tokens_seen": 3025534976 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.952095808383235e-05, |
|
"loss": 2.4345, |
|
"theoretical_loss": 3.318588340162007, |
|
"tokens_seen": 3025666048 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.947818648417451e-05, |
|
"loss": 2.5437, |
|
"theoretical_loss": 3.3185773062440527, |
|
"tokens_seen": 3025797120 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.943541488451669e-05, |
|
"loss": 2.3191, |
|
"theoretical_loss": 3.3185662729378826, |
|
"tokens_seen": 3025928192 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.939264328485885e-05, |
|
"loss": 2.3322, |
|
"theoretical_loss": 3.318555240243435, |
|
"tokens_seen": 3026059264 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.934987168520103e-05, |
|
"loss": 2.3374, |
|
"theoretical_loss": 3.3185442081606507, |
|
"tokens_seen": 3026190336 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.930710008554321e-05, |
|
"loss": 2.4206, |
|
"theoretical_loss": 3.3185331766894683, |
|
"tokens_seen": 3026321408 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"objective/train/docs_used": 1660130, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2490313053131104, |
|
"objective/train/theoretical_loss": 3.318522145829828, |
|
"objective/train/tokens_used": 56503776, |
|
"theoretical_loss": 3.318522145829828, |
|
"tokens_seen": 3026452480 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.926432848588537e-05, |
|
"loss": 2.2883, |
|
"theoretical_loss": 3.318522145829828, |
|
"tokens_seen": 3026452480 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.922155688622755e-05, |
|
"loss": 2.4436, |
|
"theoretical_loss": 3.318511115581669, |
|
"tokens_seen": 3026583552 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.917878528656972e-05, |
|
"loss": 2.4165, |
|
"theoretical_loss": 3.3185000859449314, |
|
"tokens_seen": 3026714624 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.91360136869119e-05, |
|
"loss": 2.3836, |
|
"theoretical_loss": 3.3184890569195544, |
|
"tokens_seen": 3026845696 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.909324208725407e-05, |
|
"loss": 2.3455, |
|
"theoretical_loss": 3.3184780285054782, |
|
"tokens_seen": 3026976768 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.905047048759624e-05, |
|
"loss": 2.4005, |
|
"theoretical_loss": 3.318467000702642, |
|
"tokens_seen": 3027107840 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.900769888793842e-05, |
|
"loss": 2.425, |
|
"theoretical_loss": 3.3184559735109853, |
|
"tokens_seen": 3027238912 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.896492728828058e-05, |
|
"loss": 2.4123, |
|
"theoretical_loss": 3.3184449469304482, |
|
"tokens_seen": 3027369984 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.892215568862276e-05, |
|
"loss": 2.4926, |
|
"theoretical_loss": 3.3184339209609703, |
|
"tokens_seen": 3027501056 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.887938408896494e-05, |
|
"loss": 2.2946, |
|
"theoretical_loss": 3.318422895602491, |
|
"tokens_seen": 3027632128 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.88366124893071e-05, |
|
"loss": 2.2669, |
|
"theoretical_loss": 3.31841187085495, |
|
"tokens_seen": 3027763200 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.879384088964928e-05, |
|
"loss": 2.2708, |
|
"theoretical_loss": 3.318400846718288, |
|
"tokens_seen": 3027894272 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.875106928999145e-05, |
|
"loss": 2.3823, |
|
"theoretical_loss": 3.318389823192443, |
|
"tokens_seen": 3028025344 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"objective/train/docs_used": 1661412, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.202181100845337, |
|
"objective/train/theoretical_loss": 3.3183843116585585, |
|
"objective/train/tokens_used": 58142176, |
|
"theoretical_loss": 3.3183843116585585, |
|
"tokens_seen": 3028090880 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.870829769033362e-05, |
|
"loss": 2.4362, |
|
"theoretical_loss": 3.318378800277356, |
|
"tokens_seen": 3028156416 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.86655260906758e-05, |
|
"loss": 2.3161, |
|
"theoretical_loss": 3.3183677779729663, |
|
"tokens_seen": 3028287488 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.862275449101797e-05, |
|
"loss": 2.4133, |
|
"theoretical_loss": 3.3183567562792136, |
|
"tokens_seen": 3028418560 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.857998289136013e-05, |
|
"loss": 2.464, |
|
"theoretical_loss": 3.3183457351960377, |
|
"tokens_seen": 3028549632 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.853721129170231e-05, |
|
"loss": 2.4854, |
|
"theoretical_loss": 3.3183347147233784, |
|
"tokens_seen": 3028680704 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.849443969204449e-05, |
|
"loss": 2.2685, |
|
"theoretical_loss": 3.3183236948611756, |
|
"tokens_seen": 3028811776 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.845166809238667e-05, |
|
"loss": 2.1281, |
|
"theoretical_loss": 3.3183126756093686, |
|
"tokens_seen": 3028942848 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.840889649272883e-05, |
|
"loss": 2.3911, |
|
"theoretical_loss": 3.318301656967898, |
|
"tokens_seen": 3029073920 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.8366124893071e-05, |
|
"loss": 2.2723, |
|
"theoretical_loss": 3.3182906389367024, |
|
"tokens_seen": 3029204992 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.832335329341318e-05, |
|
"loss": 2.2601, |
|
"theoretical_loss": 3.3182796215157224, |
|
"tokens_seen": 3029336064 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.828058169375535e-05, |
|
"loss": 2.3874, |
|
"theoretical_loss": 3.318268604704898, |
|
"tokens_seen": 3029467136 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.823781009409753e-05, |
|
"loss": 2.3443, |
|
"theoretical_loss": 3.318257588504168, |
|
"tokens_seen": 3029598208 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"objective/train/docs_used": 1662079, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.219090700149536, |
|
"objective/train/theoretical_loss": 3.318246572913474, |
|
"objective/train/tokens_used": 59780576, |
|
"theoretical_loss": 3.318246572913474, |
|
"tokens_seen": 3029729280 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.81950384944397e-05, |
|
"loss": 2.4424, |
|
"theoretical_loss": 3.318246572913474, |
|
"tokens_seen": 3029729280 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.815226689478186e-05, |
|
"loss": 2.4172, |
|
"theoretical_loss": 3.318235557932754, |
|
"tokens_seen": 3029860352 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.810949529512404e-05, |
|
"loss": 2.2985, |
|
"theoretical_loss": 3.318224543561948, |
|
"tokens_seen": 3029991424 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.806672369546622e-05, |
|
"loss": 2.5111, |
|
"theoretical_loss": 3.3182135298009974, |
|
"tokens_seen": 3030122496 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.80239520958084e-05, |
|
"loss": 2.481, |
|
"theoretical_loss": 3.3182025166498406, |
|
"tokens_seen": 3030253568 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.798118049615056e-05, |
|
"loss": 2.2891, |
|
"theoretical_loss": 3.3181915041084182, |
|
"tokens_seen": 3030384640 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.793840889649273e-05, |
|
"loss": 2.2625, |
|
"theoretical_loss": 3.3181804921766695, |
|
"tokens_seen": 3030515712 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.78956372968349e-05, |
|
"loss": 2.6118, |
|
"theoretical_loss": 3.318169480854535, |
|
"tokens_seen": 3030646784 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.785286569717708e-05, |
|
"loss": 2.3715, |
|
"theoretical_loss": 3.318158470141954, |
|
"tokens_seen": 3030777856 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.781009409751925e-05, |
|
"loss": 2.4212, |
|
"theoretical_loss": 3.3181474600388667, |
|
"tokens_seen": 3030908928 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.776732249786143e-05, |
|
"loss": 2.1904, |
|
"theoretical_loss": 3.318136450545213, |
|
"tokens_seen": 3031040000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.772455089820359e-05, |
|
"loss": 2.3964, |
|
"theoretical_loss": 3.318125441660933, |
|
"tokens_seen": 3031171072 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.768177929854577e-05, |
|
"loss": 2.2051, |
|
"theoretical_loss": 3.318114433385966, |
|
"tokens_seen": 3031302144 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"objective/train/docs_used": 1662642, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.347285270690918, |
|
"objective/train/theoretical_loss": 3.3181089294769563, |
|
"objective/train/tokens_used": 61418976, |
|
"theoretical_loss": 3.3181089294769563, |
|
"tokens_seen": 3031367680 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.763900769888795e-05, |
|
"loss": 2.4334, |
|
"theoretical_loss": 3.3181034257202526, |
|
"tokens_seen": 3031433216 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.759623609923011e-05, |
|
"loss": 2.3486, |
|
"theoretical_loss": 3.318092418663732, |
|
"tokens_seen": 3031564288 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.755346449957229e-05, |
|
"loss": 2.4626, |
|
"theoretical_loss": 3.3180814122163453, |
|
"tokens_seen": 3031695360 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.751069289991445e-05, |
|
"loss": 2.4253, |
|
"theoretical_loss": 3.3180704063780313, |
|
"tokens_seen": 3031826432 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.746792130025663e-05, |
|
"loss": 2.4214, |
|
"theoretical_loss": 3.318059401148731, |
|
"tokens_seen": 3031957504 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.742514970059881e-05, |
|
"loss": 2.536, |
|
"theoretical_loss": 3.3180483965283836, |
|
"tokens_seen": 3032088576 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.738237810094098e-05, |
|
"loss": 2.3038, |
|
"theoretical_loss": 3.318037392516929, |
|
"tokens_seen": 3032219648 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.733960650128315e-05, |
|
"loss": 2.3862, |
|
"theoretical_loss": 3.318026389114308, |
|
"tokens_seen": 3032350720 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.729683490162532e-05, |
|
"loss": 2.2624, |
|
"theoretical_loss": 3.3180153863204596, |
|
"tokens_seen": 3032481792 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.72540633019675e-05, |
|
"loss": 2.4936, |
|
"theoretical_loss": 3.3180043841353246, |
|
"tokens_seen": 3032612864 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.721129170230968e-05, |
|
"loss": 2.384, |
|
"theoretical_loss": 3.317993382558843, |
|
"tokens_seen": 3032743936 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.716852010265184e-05, |
|
"loss": 2.4542, |
|
"theoretical_loss": 3.317982381590954, |
|
"tokens_seen": 3032875008 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"objective/train/docs_used": 1663221, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7515668869018555, |
|
"objective/train/theoretical_loss": 3.3179713812315983, |
|
"objective/train/tokens_used": 63057376, |
|
"theoretical_loss": 3.3179713812315983, |
|
"tokens_seen": 3033006080 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.712574850299402e-05, |
|
"loss": 2.4188, |
|
"theoretical_loss": 3.3179713812315983, |
|
"tokens_seen": 3033006080 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.708297690333618e-05, |
|
"loss": 2.5102, |
|
"theoretical_loss": 3.317960381480716, |
|
"tokens_seen": 3033137152 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.704020530367836e-05, |
|
"loss": 2.39, |
|
"theoretical_loss": 3.317949382338247, |
|
"tokens_seen": 3033268224 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.699743370402054e-05, |
|
"loss": 2.3996, |
|
"theoretical_loss": 3.3179383838041314, |
|
"tokens_seen": 3033399296 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.69546621043627e-05, |
|
"loss": 2.6354, |
|
"theoretical_loss": 3.317927385878309, |
|
"tokens_seen": 3033530368 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.691189050470488e-05, |
|
"loss": 2.4611, |
|
"theoretical_loss": 3.31791638856072, |
|
"tokens_seen": 3033661440 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.686911890504705e-05, |
|
"loss": 2.4918, |
|
"theoretical_loss": 3.317905391851305, |
|
"tokens_seen": 3033792512 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.682634730538923e-05, |
|
"loss": 2.4662, |
|
"theoretical_loss": 3.3178943957500033, |
|
"tokens_seen": 3033923584 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.67835757057314e-05, |
|
"loss": 2.5224, |
|
"theoretical_loss": 3.317883400256756, |
|
"tokens_seen": 3034054656 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.674080410607357e-05, |
|
"loss": 2.5573, |
|
"theoretical_loss": 3.3178724053715016, |
|
"tokens_seen": 3034185728 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.669803250641575e-05, |
|
"loss": 2.4771, |
|
"theoretical_loss": 3.3178614110941815, |
|
"tokens_seen": 3034316800 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.665526090675791e-05, |
|
"loss": 2.6235, |
|
"theoretical_loss": 3.3178504174247356, |
|
"tokens_seen": 3034447872 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.661248930710009e-05, |
|
"loss": 2.5638, |
|
"theoretical_loss": 3.317839424363104, |
|
"tokens_seen": 3034578944 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"objective/train/docs_used": 1664363, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.340308666229248, |
|
"objective/train/theoretical_loss": 3.3178339280602, |
|
"objective/train/tokens_used": 64695776, |
|
"theoretical_loss": 3.3178339280602, |
|
"tokens_seen": 3034644480 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.656971770744227e-05, |
|
"loss": 2.4057, |
|
"theoretical_loss": 3.317828431909227, |
|
"tokens_seen": 3034710016 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.652694610778443e-05, |
|
"loss": 2.3668, |
|
"theoretical_loss": 3.3178174400630445, |
|
"tokens_seen": 3034841088 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.64841745081266e-05, |
|
"loss": 2.616, |
|
"theoretical_loss": 3.3178064488244967, |
|
"tokens_seen": 3034972160 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.644140290846878e-05, |
|
"loss": 2.402, |
|
"theoretical_loss": 3.3177954581935234, |
|
"tokens_seen": 3035103232 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.639863130881095e-05, |
|
"loss": 2.3662, |
|
"theoretical_loss": 3.317784468170066, |
|
"tokens_seen": 3035234304 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.635585970915313e-05, |
|
"loss": 2.3539, |
|
"theoretical_loss": 3.317773478754063, |
|
"tokens_seen": 3035365376 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.63130881094953e-05, |
|
"loss": 2.4253, |
|
"theoretical_loss": 3.317762489945456, |
|
"tokens_seen": 3035496448 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.627031650983746e-05, |
|
"loss": 2.3829, |
|
"theoretical_loss": 3.3177515017441843, |
|
"tokens_seen": 3035627520 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.622754491017964e-05, |
|
"loss": 2.5298, |
|
"theoretical_loss": 3.3177405141501883, |
|
"tokens_seen": 3035758592 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.618477331052182e-05, |
|
"loss": 2.4475, |
|
"theoretical_loss": 3.317729527163409, |
|
"tokens_seen": 3035889664 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.6142001710864e-05, |
|
"loss": 2.7431, |
|
"theoretical_loss": 3.3177185407837855, |
|
"tokens_seen": 3036020736 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.609923011120616e-05, |
|
"loss": 2.4028, |
|
"theoretical_loss": 3.3177075550112587, |
|
"tokens_seen": 3036151808 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"objective/train/docs_used": 1664891, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.417335033416748, |
|
"objective/train/theoretical_loss": 3.3176965698457686, |
|
"objective/train/tokens_used": 66334176, |
|
"theoretical_loss": 3.3176965698457686, |
|
"tokens_seen": 3036282880 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.605645851154833e-05, |
|
"loss": 2.4356, |
|
"theoretical_loss": 3.3176965698457686, |
|
"tokens_seen": 3036282880 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.601368691189052e-05, |
|
"loss": 2.4589, |
|
"theoretical_loss": 3.3176855852872555, |
|
"tokens_seen": 3036413952 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.597091531223268e-05, |
|
"loss": 2.4168, |
|
"theoretical_loss": 3.3176746013356597, |
|
"tokens_seen": 3036545024 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.592814371257485e-05, |
|
"loss": 2.3732, |
|
"theoretical_loss": 3.317663617990922, |
|
"tokens_seen": 3036676096 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.588537211291703e-05, |
|
"loss": 2.3843, |
|
"theoretical_loss": 3.3176526352529816, |
|
"tokens_seen": 3036807168 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.584260051325919e-05, |
|
"loss": 2.3937, |
|
"theoretical_loss": 3.31764165312178, |
|
"tokens_seen": 3036938240 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.579982891360138e-05, |
|
"loss": 2.4782, |
|
"theoretical_loss": 3.3176306715972563, |
|
"tokens_seen": 3037069312 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.575705731394355e-05, |
|
"loss": 2.5745, |
|
"theoretical_loss": 3.3176196906793516, |
|
"tokens_seen": 3037200384 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.571428571428571e-05, |
|
"loss": 2.4332, |
|
"theoretical_loss": 3.3176087103680056, |
|
"tokens_seen": 3037331456 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.567151411462789e-05, |
|
"loss": 2.4773, |
|
"theoretical_loss": 3.3175977306631594, |
|
"tokens_seen": 3037462528 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.562874251497006e-05, |
|
"loss": 2.4218, |
|
"theoretical_loss": 3.3175867515647526, |
|
"tokens_seen": 3037593600 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.558597091531225e-05, |
|
"loss": 2.3285, |
|
"theoretical_loss": 3.317575773072726, |
|
"tokens_seen": 3037724672 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.554319931565441e-05, |
|
"loss": 2.2487, |
|
"theoretical_loss": 3.3175647951870197, |
|
"tokens_seen": 3037855744 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"objective/train/docs_used": 1665791, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.037510395050049, |
|
"objective/train/theoretical_loss": 3.317559306471518, |
|
"objective/train/tokens_used": 67972576, |
|
"theoretical_loss": 3.317559306471518, |
|
"tokens_seen": 3037921280 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.550042771599658e-05, |
|
"loss": 2.4374, |
|
"theoretical_loss": 3.3175538179075743, |
|
"tokens_seen": 3037986816 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.545765611633876e-05, |
|
"loss": 2.3843, |
|
"theoretical_loss": 3.31754284123433, |
|
"tokens_seen": 3038117888 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.541488451668092e-05, |
|
"loss": 2.3408, |
|
"theoretical_loss": 3.3175318651672274, |
|
"tokens_seen": 3038248960 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.537211291702311e-05, |
|
"loss": 2.45, |
|
"theoretical_loss": 3.3175208897062065, |
|
"tokens_seen": 3038380032 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.532934131736528e-05, |
|
"loss": 2.5611, |
|
"theoretical_loss": 3.317509914851208, |
|
"tokens_seen": 3038511104 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.528656971770744e-05, |
|
"loss": 2.2891, |
|
"theoretical_loss": 3.3174989406021718, |
|
"tokens_seen": 3038642176 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.524379811804962e-05, |
|
"loss": 2.5899, |
|
"theoretical_loss": 3.317487966959039, |
|
"tokens_seen": 3038773248 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.520102651839178e-05, |
|
"loss": 2.4568, |
|
"theoretical_loss": 3.3174769939217494, |
|
"tokens_seen": 3038904320 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.515825491873396e-05, |
|
"loss": 2.4892, |
|
"theoretical_loss": 3.317466021490244, |
|
"tokens_seen": 3039035392 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.511548331907614e-05, |
|
"loss": 2.6673, |
|
"theoretical_loss": 3.3174550496644626, |
|
"tokens_seen": 3039166464 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.50727117194183e-05, |
|
"loss": 2.4839, |
|
"theoretical_loss": 3.317444078444346, |
|
"tokens_seen": 3039297536 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.502994011976048e-05, |
|
"loss": 2.4649, |
|
"theoretical_loss": 3.317433107829835, |
|
"tokens_seen": 3039428608 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"objective/train/docs_used": 1666368, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4885432720184326, |
|
"objective/train/theoretical_loss": 3.31742213782087, |
|
"objective/train/tokens_used": 69610976, |
|
"theoretical_loss": 3.31742213782087, |
|
"tokens_seen": 3039559680 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.498716852010266e-05, |
|
"loss": 2.5349, |
|
"theoretical_loss": 3.31742213782087, |
|
"tokens_seen": 3039559680 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.494439692044483e-05, |
|
"loss": 2.403, |
|
"theoretical_loss": 3.3174111684173906, |
|
"tokens_seen": 3039690752 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.4901625320787e-05, |
|
"loss": 2.5023, |
|
"theoretical_loss": 3.317400199619338, |
|
"tokens_seen": 3039821824 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.485885372112917e-05, |
|
"loss": 2.4205, |
|
"theoretical_loss": 3.3173892314266524, |
|
"tokens_seen": 3039952896 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.481608212147135e-05, |
|
"loss": 2.5295, |
|
"theoretical_loss": 3.3173782638392746, |
|
"tokens_seen": 3040083968 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.477331052181353e-05, |
|
"loss": 2.4369, |
|
"theoretical_loss": 3.3173672968571446, |
|
"tokens_seen": 3040215040 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.473053892215569e-05, |
|
"loss": 2.5302, |
|
"theoretical_loss": 3.3173563304802034, |
|
"tokens_seen": 3040346112 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.468776732249787e-05, |
|
"loss": 2.486, |
|
"theoretical_loss": 3.3173453647083915, |
|
"tokens_seen": 3040477184 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.464499572284003e-05, |
|
"loss": 2.511, |
|
"theoretical_loss": 3.317334399541649, |
|
"tokens_seen": 3040608256 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.460222412318221e-05, |
|
"loss": 2.6424, |
|
"theoretical_loss": 3.3173234349799166, |
|
"tokens_seen": 3040739328 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.455945252352439e-05, |
|
"loss": 2.4228, |
|
"theoretical_loss": 3.317312471023135, |
|
"tokens_seen": 3040870400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.451668092386656e-05, |
|
"loss": 2.5719, |
|
"theoretical_loss": 3.3173015076712447, |
|
"tokens_seen": 3041001472 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.447390932420873e-05, |
|
"loss": 2.5819, |
|
"theoretical_loss": 3.3172905449241865, |
|
"tokens_seen": 3041132544 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"objective/train/docs_used": 1667402, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5028133392333984, |
|
"objective/train/theoretical_loss": 3.317285063777451, |
|
"objective/train/tokens_used": 71249376, |
|
"theoretical_loss": 3.317285063777451, |
|
"tokens_seen": 3041198080 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.44311377245509e-05, |
|
"loss": 2.5006, |
|
"theoretical_loss": 3.3172795827819, |
|
"tokens_seen": 3041263616 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 8.438836612489306e-05, |
|
"loss": 2.4687, |
|
"theoretical_loss": 3.3172686212443274, |
|
"tokens_seen": 3041394688 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.434559452523526e-05, |
|
"loss": 2.5483, |
|
"theoretical_loss": 3.317257660311408, |
|
"tokens_seen": 3041525760 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.430282292557742e-05, |
|
"loss": 2.6292, |
|
"theoretical_loss": 3.3172466999830825, |
|
"tokens_seen": 3041656832 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.42600513259196e-05, |
|
"loss": 2.4604, |
|
"theoretical_loss": 3.317235740259292, |
|
"tokens_seen": 3041787904 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.421727972626176e-05, |
|
"loss": 2.4137, |
|
"theoretical_loss": 3.3172247811399767, |
|
"tokens_seen": 3041918976 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.417450812660394e-05, |
|
"loss": 2.5, |
|
"theoretical_loss": 3.317213822625077, |
|
"tokens_seen": 3042050048 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.413173652694612e-05, |
|
"loss": 2.5009, |
|
"theoretical_loss": 3.3172028647145346, |
|
"tokens_seen": 3042181120 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.408896492728828e-05, |
|
"loss": 2.4612, |
|
"theoretical_loss": 3.317191907408289, |
|
"tokens_seen": 3042312192 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.404619332763046e-05, |
|
"loss": 2.4446, |
|
"theoretical_loss": 3.3171809507062817, |
|
"tokens_seen": 3042443264 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.400342172797263e-05, |
|
"loss": 2.5055, |
|
"theoretical_loss": 3.3171699946084523, |
|
"tokens_seen": 3042574336 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.39606501283148e-05, |
|
"loss": 2.5237, |
|
"theoretical_loss": 3.3171590391147427, |
|
"tokens_seen": 3042705408 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"objective/train/docs_used": 1668521, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3456153869628906, |
|
"objective/train/theoretical_loss": 3.3171480842250927, |
|
"objective/train/tokens_used": 72887776, |
|
"theoretical_loss": 3.3171480842250927, |
|
"tokens_seen": 3042836480 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.391787852865698e-05, |
|
"loss": 2.4685, |
|
"theoretical_loss": 3.3171480842250927, |
|
"tokens_seen": 3042836480 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.387510692899915e-05, |
|
"loss": 2.4987, |
|
"theoretical_loss": 3.317137129939443, |
|
"tokens_seen": 3042967552 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.383233532934131e-05, |
|
"loss": 2.4009, |
|
"theoretical_loss": 3.3171261762577346, |
|
"tokens_seen": 3043098624 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.378956372968349e-05, |
|
"loss": 2.4797, |
|
"theoretical_loss": 3.3171152231799086, |
|
"tokens_seen": 3043229696 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.374679213002567e-05, |
|
"loss": 2.4712, |
|
"theoretical_loss": 3.317104270705905, |
|
"tokens_seen": 3043360768 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.370402053036785e-05, |
|
"loss": 2.4712, |
|
"theoretical_loss": 3.3170933188356644, |
|
"tokens_seen": 3043491840 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.366124893071001e-05, |
|
"loss": 2.5555, |
|
"theoretical_loss": 3.3170823675691277, |
|
"tokens_seen": 3043622912 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.361847733105218e-05, |
|
"loss": 2.5155, |
|
"theoretical_loss": 3.317071416906236, |
|
"tokens_seen": 3043753984 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.357570573139436e-05, |
|
"loss": 2.4821, |
|
"theoretical_loss": 3.3170604668469297, |
|
"tokens_seen": 3043885056 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.353293413173653e-05, |
|
"loss": 2.5343, |
|
"theoretical_loss": 3.31704951739115, |
|
"tokens_seen": 3044016128 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.349016253207871e-05, |
|
"loss": 2.4523, |
|
"theoretical_loss": 3.317038568538837, |
|
"tokens_seen": 3044147200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.344739093242088e-05, |
|
"loss": 2.3806, |
|
"theoretical_loss": 3.317027620289932, |
|
"tokens_seen": 3044278272 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.340461933276304e-05, |
|
"loss": 2.5725, |
|
"theoretical_loss": 3.317016672644375, |
|
"tokens_seen": 3044409344 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"objective/train/docs_used": 1668980, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.030937671661377, |
|
"objective/train/theoretical_loss": 3.3170111990478337, |
|
"objective/train/tokens_used": 74526176, |
|
"theoretical_loss": 3.3170111990478337, |
|
"tokens_seen": 3044474880 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.336184773310522e-05, |
|
"loss": 2.3872, |
|
"theoretical_loss": 3.3170057256021077, |
|
"tokens_seen": 3044540416 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.33190761334474e-05, |
|
"loss": 2.497, |
|
"theoretical_loss": 3.3169947791630703, |
|
"tokens_seen": 3044671488 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.327630453378958e-05, |
|
"loss": 2.5771, |
|
"theoretical_loss": 3.3169838333272037, |
|
"tokens_seen": 3044802560 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.323353293413174e-05, |
|
"loss": 2.4761, |
|
"theoretical_loss": 3.316972888094449, |
|
"tokens_seen": 3044933632 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.319076133447391e-05, |
|
"loss": 2.4855, |
|
"theoretical_loss": 3.3169619434647464, |
|
"tokens_seen": 3045064704 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.314798973481609e-05, |
|
"loss": 2.3585, |
|
"theoretical_loss": 3.3169509994380375, |
|
"tokens_seen": 3045195776 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.310521813515826e-05, |
|
"loss": 2.5985, |
|
"theoretical_loss": 3.3169400560142623, |
|
"tokens_seen": 3045326848 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.306244653550043e-05, |
|
"loss": 2.4823, |
|
"theoretical_loss": 3.3169291131933623, |
|
"tokens_seen": 3045457920 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.30196749358426e-05, |
|
"loss": 2.4651, |
|
"theoretical_loss": 3.316918170975278, |
|
"tokens_seen": 3045588992 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.297690333618477e-05, |
|
"loss": 2.4029, |
|
"theoretical_loss": 3.31690722935995, |
|
"tokens_seen": 3045720064 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.293413173652695e-05, |
|
"loss": 2.5912, |
|
"theoretical_loss": 3.3168962883473205, |
|
"tokens_seen": 3045851136 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.289136013686913e-05, |
|
"loss": 2.5166, |
|
"theoretical_loss": 3.316885347937329, |
|
"tokens_seen": 3045982208 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"objective/train/docs_used": 1670028, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2350833415985107, |
|
"objective/train/theoretical_loss": 3.316874408129916, |
|
"objective/train/tokens_used": 76164576, |
|
"theoretical_loss": 3.316874408129916, |
|
"tokens_seen": 3046113280 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.284858853721129e-05, |
|
"loss": 2.5141, |
|
"theoretical_loss": 3.316874408129916, |
|
"tokens_seen": 3046113280 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.280581693755347e-05, |
|
"loss": 2.441, |
|
"theoretical_loss": 3.316863468925024, |
|
"tokens_seen": 3046244352 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.276304533789564e-05, |
|
"loss": 2.5365, |
|
"theoretical_loss": 3.3168525303225924, |
|
"tokens_seen": 3046375424 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.272027373823781e-05, |
|
"loss": 2.6514, |
|
"theoretical_loss": 3.316841592322563, |
|
"tokens_seen": 3046506496 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.267750213857999e-05, |
|
"loss": 2.6148, |
|
"theoretical_loss": 3.3168306549248765, |
|
"tokens_seen": 3046637568 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.263473053892216e-05, |
|
"loss": 2.4865, |
|
"theoretical_loss": 3.316819718129474, |
|
"tokens_seen": 3046768640 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.259195893926434e-05, |
|
"loss": 2.5057, |
|
"theoretical_loss": 3.3168087819362957, |
|
"tokens_seen": 3046899712 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.25491873396065e-05, |
|
"loss": 2.4055, |
|
"theoretical_loss": 3.316797846345283, |
|
"tokens_seen": 3047030784 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.250641573994868e-05, |
|
"loss": 2.5902, |
|
"theoretical_loss": 3.316786911356377, |
|
"tokens_seen": 3047161856 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.246364414029086e-05, |
|
"loss": 2.3611, |
|
"theoretical_loss": 3.316775976969519, |
|
"tokens_seen": 3047292928 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.242087254063302e-05, |
|
"loss": 2.5959, |
|
"theoretical_loss": 3.316765043184649, |
|
"tokens_seen": 3047424000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 8.23781009409752e-05, |
|
"loss": 2.5646, |
|
"theoretical_loss": 3.316754110001708, |
|
"tokens_seen": 3047555072 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.233532934131736e-05, |
|
"loss": 2.585, |
|
"theoretical_loss": 3.3167431774206384, |
|
"tokens_seen": 3047686144 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"objective/train/docs_used": 1670628, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 1.981849193572998, |
|
"objective/train/theoretical_loss": 3.316737711355786, |
|
"objective/train/tokens_used": 77802976, |
|
"theoretical_loss": 3.316737711355786, |
|
"tokens_seen": 3047751680 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.229255774165954e-05, |
|
"loss": 2.3982, |
|
"theoretical_loss": 3.3167322454413792, |
|
"tokens_seen": 3047817216 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.224978614200172e-05, |
|
"loss": 2.5217, |
|
"theoretical_loss": 3.316721314063873, |
|
"tokens_seen": 3047948288 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.220701454234389e-05, |
|
"loss": 2.6089, |
|
"theoretical_loss": 3.3167103832880604, |
|
"tokens_seen": 3048079360 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.216424294268606e-05, |
|
"loss": 2.529, |
|
"theoretical_loss": 3.316699453113882, |
|
"tokens_seen": 3048210432 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.212147134302823e-05, |
|
"loss": 2.4732, |
|
"theoretical_loss": 3.3166885235412784, |
|
"tokens_seen": 3048341504 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.207869974337041e-05, |
|
"loss": 2.4514, |
|
"theoretical_loss": 3.316677594570192, |
|
"tokens_seen": 3048472576 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.203592814371259e-05, |
|
"loss": 2.4329, |
|
"theoretical_loss": 3.316666666200563, |
|
"tokens_seen": 3048603648 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.199315654405475e-05, |
|
"loss": 2.5147, |
|
"theoretical_loss": 3.316655738432332, |
|
"tokens_seen": 3048734720 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.195038494439693e-05, |
|
"loss": 2.4813, |
|
"theoretical_loss": 3.3166448112654408, |
|
"tokens_seen": 3048865792 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.190761334473909e-05, |
|
"loss": 2.4503, |
|
"theoretical_loss": 3.3166338846998302, |
|
"tokens_seen": 3048996864 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.186484174508127e-05, |
|
"loss": 2.5814, |
|
"theoretical_loss": 3.316622958735442, |
|
"tokens_seen": 3049127936 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.182207014542345e-05, |
|
"loss": 2.5462, |
|
"theoretical_loss": 3.3166120333722158, |
|
"tokens_seen": 3049259008 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"objective/train/docs_used": 1671752, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.1045682430267334, |
|
"objective/train/theoretical_loss": 3.3166011086100937, |
|
"objective/train/tokens_used": 79441376, |
|
"theoretical_loss": 3.3166011086100937, |
|
"tokens_seen": 3049390080 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.177929854576561e-05, |
|
"loss": 2.5946, |
|
"theoretical_loss": 3.3166011086100937, |
|
"tokens_seen": 3049390080 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.173652694610778e-05, |
|
"loss": 2.4925, |
|
"theoretical_loss": 3.3165901844490167, |
|
"tokens_seen": 3049521152 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.169375534644996e-05, |
|
"loss": 2.4937, |
|
"theoretical_loss": 3.3165792608889255, |
|
"tokens_seen": 3049652224 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.165098374679214e-05, |
|
"loss": 2.5232, |
|
"theoretical_loss": 3.3165683379297612, |
|
"tokens_seen": 3049783296 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.160821214713431e-05, |
|
"loss": 2.4577, |
|
"theoretical_loss": 3.3165574155714657, |
|
"tokens_seen": 3049914368 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.156544054747648e-05, |
|
"loss": 2.5193, |
|
"theoretical_loss": 3.3165464938139797, |
|
"tokens_seen": 3050045440 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.152266894781864e-05, |
|
"loss": 2.4776, |
|
"theoretical_loss": 3.3165355726572434, |
|
"tokens_seen": 3050176512 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.147989734816082e-05, |
|
"loss": 2.4092, |
|
"theoretical_loss": 3.3165246521011995, |
|
"tokens_seen": 3050307584 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.1437125748503e-05, |
|
"loss": 2.3898, |
|
"theoretical_loss": 3.3165137321457885, |
|
"tokens_seen": 3050438656 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.139435414884518e-05, |
|
"loss": 2.4147, |
|
"theoretical_loss": 3.3165028127909513, |
|
"tokens_seen": 3050569728 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.135158254918734e-05, |
|
"loss": 2.559, |
|
"theoretical_loss": 3.3164918940366293, |
|
"tokens_seen": 3050700800 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.130881094952951e-05, |
|
"loss": 2.4779, |
|
"theoretical_loss": 3.3164809758827634, |
|
"tokens_seen": 3050831872 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.126603934987169e-05, |
|
"loss": 2.5393, |
|
"theoretical_loss": 3.3164700583292954, |
|
"tokens_seen": 3050962944 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"objective/train/docs_used": 1672176, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2255687713623047, |
|
"objective/train/theoretical_loss": 3.316464599777692, |
|
"objective/train/tokens_used": 81079776, |
|
"theoretical_loss": 3.316464599777692, |
|
"tokens_seen": 3051028480 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.122326775021386e-05, |
|
"loss": 2.4892, |
|
"theoretical_loss": 3.316459141376166, |
|
"tokens_seen": 3051094016 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.118049615055604e-05, |
|
"loss": 2.4738, |
|
"theoretical_loss": 3.3164482250233163, |
|
"tokens_seen": 3051225088 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.113772455089821e-05, |
|
"loss": 2.5011, |
|
"theoretical_loss": 3.316437309270688, |
|
"tokens_seen": 3051356160 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.109495295124037e-05, |
|
"loss": 2.4879, |
|
"theoretical_loss": 3.316426394118222, |
|
"tokens_seen": 3051487232 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.105218135158255e-05, |
|
"loss": 2.441, |
|
"theoretical_loss": 3.316415479565859, |
|
"tokens_seen": 3051618304 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.100940975192473e-05, |
|
"loss": 2.5491, |
|
"theoretical_loss": 3.3164045656135417, |
|
"tokens_seen": 3051749376 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.09666381522669e-05, |
|
"loss": 2.502, |
|
"theoretical_loss": 3.3163936522612096, |
|
"tokens_seen": 3051880448 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.092386655260907e-05, |
|
"loss": 2.3763, |
|
"theoretical_loss": 3.3163827395088052, |
|
"tokens_seen": 3052011520 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.088109495295124e-05, |
|
"loss": 2.4419, |
|
"theoretical_loss": 3.3163718273562695, |
|
"tokens_seen": 3052142592 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.083832335329341e-05, |
|
"loss": 2.4011, |
|
"theoretical_loss": 3.3163609158035436, |
|
"tokens_seen": 3052273664 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.07955517536356e-05, |
|
"loss": 2.4094, |
|
"theoretical_loss": 3.3163500048505687, |
|
"tokens_seen": 3052404736 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.075278015397776e-05, |
|
"loss": 2.4447, |
|
"theoretical_loss": 3.3163390944972857, |
|
"tokens_seen": 3052535808 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"objective/train/docs_used": 1673386, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.077202320098877, |
|
"objective/train/theoretical_loss": 3.316328184743637, |
|
"objective/train/tokens_used": 82718176, |
|
"theoretical_loss": 3.316328184743637, |
|
"tokens_seen": 3052666880 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.071000855431994e-05, |
|
"loss": 2.3368, |
|
"theoretical_loss": 3.316328184743637, |
|
"tokens_seen": 3052666880 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.06672369546621e-05, |
|
"loss": 2.578, |
|
"theoretical_loss": 3.3163172755895634, |
|
"tokens_seen": 3052797952 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.062446535500429e-05, |
|
"loss": 2.5108, |
|
"theoretical_loss": 3.3163063670350055, |
|
"tokens_seen": 3052929024 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.058169375534646e-05, |
|
"loss": 2.4558, |
|
"theoretical_loss": 3.3162954590799054, |
|
"tokens_seen": 3053060096 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.053892215568862e-05, |
|
"loss": 2.5339, |
|
"theoretical_loss": 3.316284551724204, |
|
"tokens_seen": 3053191168 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.04961505560308e-05, |
|
"loss": 2.3677, |
|
"theoretical_loss": 3.3162736449678434, |
|
"tokens_seen": 3053322240 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.045337895637297e-05, |
|
"loss": 2.5638, |
|
"theoretical_loss": 3.3162627388107637, |
|
"tokens_seen": 3053453312 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.041060735671514e-05, |
|
"loss": 2.4784, |
|
"theoretical_loss": 3.316251833252908, |
|
"tokens_seen": 3053584384 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.036783575705732e-05, |
|
"loss": 2.486, |
|
"theoretical_loss": 3.3162409282942154, |
|
"tokens_seen": 3053715456 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.032506415739949e-05, |
|
"loss": 2.4993, |
|
"theoretical_loss": 3.316230023934629, |
|
"tokens_seen": 3053846528 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.028229255774167e-05, |
|
"loss": 2.5384, |
|
"theoretical_loss": 3.3162191201740896, |
|
"tokens_seen": 3053977600 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.023952095808383e-05, |
|
"loss": 2.5113, |
|
"theoretical_loss": 3.3162082170125387, |
|
"tokens_seen": 3054108672 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.019674935842601e-05, |
|
"loss": 2.2871, |
|
"theoretical_loss": 3.3161973144499175, |
|
"tokens_seen": 3054239744 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"objective/train/docs_used": 1673816, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4686410427093506, |
|
"objective/train/theoretical_loss": 3.316191863393187, |
|
"objective/train/tokens_used": 84356576, |
|
"theoretical_loss": 3.316191863393187, |
|
"tokens_seen": 3054305280 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.015397775876819e-05, |
|
"loss": 2.4141, |
|
"theoretical_loss": 3.3161864124861675, |
|
"tokens_seen": 3054370816 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.011120615911035e-05, |
|
"loss": 2.5008, |
|
"theoretical_loss": 3.31617551112123, |
|
"tokens_seen": 3054501888 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.006843455945253e-05, |
|
"loss": 2.5764, |
|
"theoretical_loss": 3.316164610355047, |
|
"tokens_seen": 3054632960 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.00256629597947e-05, |
|
"loss": 2.4664, |
|
"theoretical_loss": 3.316153710187559, |
|
"tokens_seen": 3054764032 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.998289136013687e-05, |
|
"loss": 2.5636, |
|
"theoretical_loss": 3.316142810618708, |
|
"tokens_seen": 3054895104 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.994011976047905e-05, |
|
"loss": 2.54, |
|
"theoretical_loss": 3.3161319116484353, |
|
"tokens_seen": 3055026176 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.989734816082122e-05, |
|
"loss": 2.6878, |
|
"theoretical_loss": 3.3161210132766823, |
|
"tokens_seen": 3055157248 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.98545765611634e-05, |
|
"loss": 2.6299, |
|
"theoretical_loss": 3.316110115503391, |
|
"tokens_seen": 3055288320 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.981180496150556e-05, |
|
"loss": 2.476, |
|
"theoretical_loss": 3.316099218328502, |
|
"tokens_seen": 3055419392 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.976903336184774e-05, |
|
"loss": 2.5998, |
|
"theoretical_loss": 3.3160883217519572, |
|
"tokens_seen": 3055550464 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.972626176218992e-05, |
|
"loss": 2.4002, |
|
"theoretical_loss": 3.316077425773698, |
|
"tokens_seen": 3055681536 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.968349016253208e-05, |
|
"loss": 2.5102, |
|
"theoretical_loss": 3.316066530393666, |
|
"tokens_seen": 3055812608 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"objective/train/docs_used": 1674780, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8518364429473877, |
|
"objective/train/theoretical_loss": 3.3160556356118027, |
|
"objective/train/tokens_used": 85994976, |
|
"theoretical_loss": 3.3160556356118027, |
|
"tokens_seen": 3055943680 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.964071856287424e-05, |
|
"loss": 2.5794, |
|
"theoretical_loss": 3.3160556356118027, |
|
"tokens_seen": 3055943680 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.959794696321644e-05, |
|
"loss": 2.6511, |
|
"theoretical_loss": 3.3160447414280494, |
|
"tokens_seen": 3056074752 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.95551753635586e-05, |
|
"loss": 2.4842, |
|
"theoretical_loss": 3.316033847842348, |
|
"tokens_seen": 3056205824 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.951240376390078e-05, |
|
"loss": 2.4747, |
|
"theoretical_loss": 3.3160229548546396, |
|
"tokens_seen": 3056336896 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.946963216424294e-05, |
|
"loss": 2.4766, |
|
"theoretical_loss": 3.316012062464866, |
|
"tokens_seen": 3056467968 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.942686056458511e-05, |
|
"loss": 2.611, |
|
"theoretical_loss": 3.316001170672968, |
|
"tokens_seen": 3056599040 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.93840889649273e-05, |
|
"loss": 2.4415, |
|
"theoretical_loss": 3.3159902794788887, |
|
"tokens_seen": 3056730112 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 7.934131736526947e-05, |
|
"loss": 2.4817, |
|
"theoretical_loss": 3.3159793888825684, |
|
"tokens_seen": 3056861184 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.929854576561164e-05, |
|
"loss": 2.5289, |
|
"theoretical_loss": 3.315968498883949, |
|
"tokens_seen": 3056992256 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.925577416595381e-05, |
|
"loss": 2.4786, |
|
"theoretical_loss": 3.3159576094829726, |
|
"tokens_seen": 3057123328 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.921300256629597e-05, |
|
"loss": 2.6726, |
|
"theoretical_loss": 3.3159467206795794, |
|
"tokens_seen": 3057254400 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.917023096663817e-05, |
|
"loss": 2.4774, |
|
"theoretical_loss": 3.3159358324737123, |
|
"tokens_seen": 3057385472 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.912745936698033e-05, |
|
"loss": 2.3915, |
|
"theoretical_loss": 3.3159249448653125, |
|
"tokens_seen": 3057516544 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"objective/train/docs_used": 1675938, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4315812587738037, |
|
"objective/train/theoretical_loss": 3.3159195012851446, |
|
"objective/train/tokens_used": 87633376, |
|
"theoretical_loss": 3.3159195012851446, |
|
"tokens_seen": 3057582080 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.90846877673225e-05, |
|
"loss": 2.4585, |
|
"theoretical_loss": 3.3159140578543216, |
|
"tokens_seen": 3057647616 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.904191616766467e-05, |
|
"loss": 2.474, |
|
"theoretical_loss": 3.315903171440681, |
|
"tokens_seen": 3057778688 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.899914456800684e-05, |
|
"loss": 2.3614, |
|
"theoretical_loss": 3.3158922856243325, |
|
"tokens_seen": 3057909760 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.895637296834903e-05, |
|
"loss": 2.4224, |
|
"theoretical_loss": 3.3158814004052175, |
|
"tokens_seen": 3058040832 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.89136013686912e-05, |
|
"loss": 2.5027, |
|
"theoretical_loss": 3.3158705157832786, |
|
"tokens_seen": 3058171904 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.887082976903336e-05, |
|
"loss": 2.4609, |
|
"theoretical_loss": 3.315859631758456, |
|
"tokens_seen": 3058302976 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.882805816937554e-05, |
|
"loss": 2.5555, |
|
"theoretical_loss": 3.3158487483306924, |
|
"tokens_seen": 3058434048 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.878528656971772e-05, |
|
"loss": 2.3801, |
|
"theoretical_loss": 3.3158378654999288, |
|
"tokens_seen": 3058565120 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.87425149700599e-05, |
|
"loss": 2.491, |
|
"theoretical_loss": 3.315826983266107, |
|
"tokens_seen": 3058696192 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.869974337040206e-05, |
|
"loss": 2.5554, |
|
"theoretical_loss": 3.31581610162917, |
|
"tokens_seen": 3058827264 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.865697177074422e-05, |
|
"loss": 2.4215, |
|
"theoretical_loss": 3.3158052205890574, |
|
"tokens_seen": 3058958336 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.86142001710864e-05, |
|
"loss": 2.4309, |
|
"theoretical_loss": 3.315794340145712, |
|
"tokens_seen": 3059089408 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"objective/train/docs_used": 1676633, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8794491291046143, |
|
"objective/train/theoretical_loss": 3.3157834602990754, |
|
"objective/train/tokens_used": 89271776, |
|
"theoretical_loss": 3.3157834602990754, |
|
"tokens_seen": 3059220480 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.857142857142858e-05, |
|
"loss": 2.507, |
|
"theoretical_loss": 3.3157834602990754, |
|
"tokens_seen": 3059220480 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.852865697177076e-05, |
|
"loss": 2.4838, |
|
"theoretical_loss": 3.3157725810490892, |
|
"tokens_seen": 3059351552 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.848588537211292e-05, |
|
"loss": 2.3672, |
|
"theoretical_loss": 3.3157617023956956, |
|
"tokens_seen": 3059482624 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.844311377245509e-05, |
|
"loss": 2.5403, |
|
"theoretical_loss": 3.3157508243388354, |
|
"tokens_seen": 3059613696 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.840034217279727e-05, |
|
"loss": 2.3567, |
|
"theoretical_loss": 3.315739946878451, |
|
"tokens_seen": 3059744768 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.835757057313944e-05, |
|
"loss": 2.5668, |
|
"theoretical_loss": 3.3157290700144837, |
|
"tokens_seen": 3059875840 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.831479897348161e-05, |
|
"loss": 2.6316, |
|
"theoretical_loss": 3.315718193746876, |
|
"tokens_seen": 3060006912 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.827202737382379e-05, |
|
"loss": 2.6666, |
|
"theoretical_loss": 3.315707318075569, |
|
"tokens_seen": 3060137984 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.822925577416595e-05, |
|
"loss": 2.4661, |
|
"theoretical_loss": 3.3156964430005047, |
|
"tokens_seen": 3060269056 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.818648417450813e-05, |
|
"loss": 2.5179, |
|
"theoretical_loss": 3.315685568521625, |
|
"tokens_seen": 3060400128 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.814371257485031e-05, |
|
"loss": 2.5853, |
|
"theoretical_loss": 3.3156746946388713, |
|
"tokens_seen": 3060531200 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.810094097519247e-05, |
|
"loss": 2.5132, |
|
"theoretical_loss": 3.315663821352186, |
|
"tokens_seen": 3060662272 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.805816937553465e-05, |
|
"loss": 2.6414, |
|
"theoretical_loss": 3.3156529486615103, |
|
"tokens_seen": 3060793344 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"objective/train/docs_used": 1677094, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3754663467407227, |
|
"objective/train/theoretical_loss": 3.315647512539658, |
|
"objective/train/tokens_used": 90910176, |
|
"theoretical_loss": 3.315647512539658, |
|
"tokens_seen": 3060858880 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.801539777587682e-05, |
|
"loss": 2.6056, |
|
"theoretical_loss": 3.3156420765667862, |
|
"tokens_seen": 3060924416 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.7972626176219e-05, |
|
"loss": 2.5379, |
|
"theoretical_loss": 3.3156312050679553, |
|
"tokens_seen": 3061055488 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.792985457656117e-05, |
|
"loss": 2.547, |
|
"theoretical_loss": 3.31562033416496, |
|
"tokens_seen": 3061186560 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.788708297690334e-05, |
|
"loss": 2.533, |
|
"theoretical_loss": 3.315609463857742, |
|
"tokens_seen": 3061317632 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.784431137724552e-05, |
|
"loss": 2.5029, |
|
"theoretical_loss": 3.3155985941462425, |
|
"tokens_seen": 3061448704 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.780153977758768e-05, |
|
"loss": 2.478, |
|
"theoretical_loss": 3.315587725030404, |
|
"tokens_seen": 3061579776 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.775876817792986e-05, |
|
"loss": 2.5078, |
|
"theoretical_loss": 3.315576856510168, |
|
"tokens_seen": 3061710848 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.771599657827204e-05, |
|
"loss": 2.508, |
|
"theoretical_loss": 3.315565988585477, |
|
"tokens_seen": 3061841920 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.76732249786142e-05, |
|
"loss": 2.5306, |
|
"theoretical_loss": 3.3155551212562724, |
|
"tokens_seen": 3061972992 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.763045337895638e-05, |
|
"loss": 2.6, |
|
"theoretical_loss": 3.3155442545224956, |
|
"tokens_seen": 3062104064 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.758768177929855e-05, |
|
"loss": 2.4638, |
|
"theoretical_loss": 3.315533388384089, |
|
"tokens_seen": 3062235136 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.754491017964072e-05, |
|
"loss": 2.6104, |
|
"theoretical_loss": 3.315522522840995, |
|
"tokens_seen": 3062366208 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"objective/train/docs_used": 1678209, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.218628406524658, |
|
"objective/train/theoretical_loss": 3.3155116578931545, |
|
"objective/train/tokens_used": 92548576, |
|
"theoretical_loss": 3.3155116578931545, |
|
"tokens_seen": 3062497280 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.75021385799829e-05, |
|
"loss": 2.2883, |
|
"theoretical_loss": 3.3155116578931545, |
|
"tokens_seen": 3062497280 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.745936698032507e-05, |
|
"loss": 2.5558, |
|
"theoretical_loss": 3.31550079354051, |
|
"tokens_seen": 3062628352 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.741659538066724e-05, |
|
"loss": 2.5281, |
|
"theoretical_loss": 3.315489929783004, |
|
"tokens_seen": 3062759424 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.737382378100941e-05, |
|
"loss": 2.5825, |
|
"theoretical_loss": 3.315479066620577, |
|
"tokens_seen": 3062890496 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.733105218135159e-05, |
|
"loss": 2.5783, |
|
"theoretical_loss": 3.3154682040531718, |
|
"tokens_seen": 3063021568 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7.728828058169377e-05, |
|
"loss": 2.4715, |
|
"theoretical_loss": 3.3154573420807303, |
|
"tokens_seen": 3063152640 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.724550898203593e-05, |
|
"loss": 2.562, |
|
"theoretical_loss": 3.3154464807031943, |
|
"tokens_seen": 3063283712 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.720273738237811e-05, |
|
"loss": 2.4961, |
|
"theoretical_loss": 3.315435619920506, |
|
"tokens_seen": 3063414784 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.715996578272027e-05, |
|
"loss": 2.4626, |
|
"theoretical_loss": 3.315424759732607, |
|
"tokens_seen": 3063545856 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.711719418306245e-05, |
|
"loss": 2.4789, |
|
"theoretical_loss": 3.31541390013944, |
|
"tokens_seen": 3063676928 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.707442258340463e-05, |
|
"loss": 2.4289, |
|
"theoretical_loss": 3.3154030411409465, |
|
"tokens_seen": 3063808000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.70316509837468e-05, |
|
"loss": 2.5123, |
|
"theoretical_loss": 3.3153921827370683, |
|
"tokens_seen": 3063939072 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.698887938408896e-05, |
|
"loss": 2.5002, |
|
"theoretical_loss": 3.3153813249277473, |
|
"tokens_seen": 3064070144 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"objective/train/docs_used": 1678783, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.781327724456787, |
|
"objective/train/theoretical_loss": 3.315375896246028, |
|
"objective/train/tokens_used": 94186976, |
|
"theoretical_loss": 3.315375896246028, |
|
"tokens_seen": 3064135680 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.694610778443114e-05, |
|
"loss": 2.5217, |
|
"theoretical_loss": 3.3153704677129263, |
|
"tokens_seen": 3064201216 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.690333618477332e-05, |
|
"loss": 2.4925, |
|
"theoretical_loss": 3.3153596110925467, |
|
"tokens_seen": 3064332288 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.68605645851155e-05, |
|
"loss": 2.459, |
|
"theoretical_loss": 3.315348755066551, |
|
"tokens_seen": 3064463360 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.681779298545766e-05, |
|
"loss": 2.4395, |
|
"theoretical_loss": 3.31533789963488, |
|
"tokens_seen": 3064594432 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.677502138579982e-05, |
|
"loss": 2.4192, |
|
"theoretical_loss": 3.3153270447974776, |
|
"tokens_seen": 3064725504 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.6732249786142e-05, |
|
"loss": 2.5295, |
|
"theoretical_loss": 3.3153161905542845, |
|
"tokens_seen": 3064856576 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.668947818648418e-05, |
|
"loss": 2.4586, |
|
"theoretical_loss": 3.315305336905243, |
|
"tokens_seen": 3064987648 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.664670658682636e-05, |
|
"loss": 2.4483, |
|
"theoretical_loss": 3.3152944838502956, |
|
"tokens_seen": 3065118720 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.660393498716852e-05, |
|
"loss": 2.5967, |
|
"theoretical_loss": 3.3152836313893843, |
|
"tokens_seen": 3065249792 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.656116338751069e-05, |
|
"loss": 2.6626, |
|
"theoretical_loss": 3.3152727795224504, |
|
"tokens_seen": 3065380864 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.651839178785287e-05, |
|
"loss": 2.5101, |
|
"theoretical_loss": 3.3152619282494373, |
|
"tokens_seen": 3065511936 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.647562018819505e-05, |
|
"loss": 2.3196, |
|
"theoretical_loss": 3.315251077570286, |
|
"tokens_seen": 3065643008 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"objective/train/docs_used": 1679875, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6284544467926025, |
|
"objective/train/theoretical_loss": 3.3152402274849395, |
|
"objective/train/tokens_used": 95825376, |
|
"theoretical_loss": 3.3152402274849395, |
|
"tokens_seen": 3065774080 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.643284858853722e-05, |
|
"loss": 2.5859, |
|
"theoretical_loss": 3.3152402274849395, |
|
"tokens_seen": 3065774080 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.639007698887939e-05, |
|
"loss": 2.3573, |
|
"theoretical_loss": 3.315229377993339, |
|
"tokens_seen": 3065905152 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.634730538922155e-05, |
|
"loss": 2.435, |
|
"theoretical_loss": 3.315218529095427, |
|
"tokens_seen": 3066036224 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.630453378956373e-05, |
|
"loss": 2.4501, |
|
"theoretical_loss": 3.315207680791146, |
|
"tokens_seen": 3066167296 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.626176218990591e-05, |
|
"loss": 2.484, |
|
"theoretical_loss": 3.3151968330804378, |
|
"tokens_seen": 3066298368 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.621899059024807e-05, |
|
"loss": 2.4815, |
|
"theoretical_loss": 3.3151859859632444, |
|
"tokens_seen": 3066429440 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.617621899059025e-05, |
|
"loss": 2.3643, |
|
"theoretical_loss": 3.3151751394395084, |
|
"tokens_seen": 3066560512 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.613344739093242e-05, |
|
"loss": 2.6645, |
|
"theoretical_loss": 3.3151642935091714, |
|
"tokens_seen": 3066691584 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.60906757912746e-05, |
|
"loss": 2.4228, |
|
"theoretical_loss": 3.3151534481721763, |
|
"tokens_seen": 3066822656 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.604790419161677e-05, |
|
"loss": 2.5867, |
|
"theoretical_loss": 3.3151426034284643, |
|
"tokens_seen": 3066953728 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.600513259195894e-05, |
|
"loss": 2.6365, |
|
"theoretical_loss": 3.3151317592779788, |
|
"tokens_seen": 3067084800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.596236099230112e-05, |
|
"loss": 2.5273, |
|
"theoretical_loss": 3.315120915720661, |
|
"tokens_seen": 3067215872 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.591958939264328e-05, |
|
"loss": 2.5048, |
|
"theoretical_loss": 3.315110072756454, |
|
"tokens_seen": 3067346944 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"objective/train/docs_used": 1680552, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5477449893951416, |
|
"objective/train/theoretical_loss": 3.3151046514967484, |
|
"objective/train/tokens_used": 97463776, |
|
"theoretical_loss": 3.3151046514967484, |
|
"tokens_seen": 3067412480 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.587681779298546e-05, |
|
"loss": 2.3805, |
|
"theoretical_loss": 3.315099230385299, |
|
"tokens_seen": 3067478016 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.583404619332764e-05, |
|
"loss": 2.4863, |
|
"theoretical_loss": 3.3150883886071387, |
|
"tokens_seen": 3067609088 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.57912745936698e-05, |
|
"loss": 2.3692, |
|
"theoretical_loss": 3.3150775474219154, |
|
"tokens_seen": 3067740160 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.574850299401198e-05, |
|
"loss": 2.5147, |
|
"theoretical_loss": 3.3150667068295716, |
|
"tokens_seen": 3067871232 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.570573139435415e-05, |
|
"loss": 2.4548, |
|
"theoretical_loss": 3.3150558668300487, |
|
"tokens_seen": 3068002304 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.566295979469632e-05, |
|
"loss": 2.515, |
|
"theoretical_loss": 3.31504502742329, |
|
"tokens_seen": 3068133376 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.56201881950385e-05, |
|
"loss": 2.4336, |
|
"theoretical_loss": 3.3150341886092374, |
|
"tokens_seen": 3068264448 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.557741659538067e-05, |
|
"loss": 2.4561, |
|
"theoretical_loss": 3.3150233503878326, |
|
"tokens_seen": 3068395520 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.553464499572285e-05, |
|
"loss": 2.5634, |
|
"theoretical_loss": 3.3150125127590186, |
|
"tokens_seen": 3068526592 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.549187339606501e-05, |
|
"loss": 2.5016, |
|
"theoretical_loss": 3.3150016757227374, |
|
"tokens_seen": 3068657664 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.544910179640719e-05, |
|
"loss": 2.3541, |
|
"theoretical_loss": 3.314990839278931, |
|
"tokens_seen": 3068788736 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.540633019674937e-05, |
|
"loss": 2.3958, |
|
"theoretical_loss": 3.314980003427542, |
|
"tokens_seen": 3068919808 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"objective/train/docs_used": 1681818, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7954180240631104, |
|
"objective/train/theoretical_loss": 3.3149691681685134, |
|
"objective/train/tokens_used": 99102176, |
|
"theoretical_loss": 3.3149691681685134, |
|
"tokens_seen": 3069050880 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.536355859709153e-05, |
|
"loss": 2.5015, |
|
"theoretical_loss": 3.3149691681685134, |
|
"tokens_seen": 3069050880 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.532078699743371e-05, |
|
"loss": 2.5696, |
|
"theoretical_loss": 3.314958333501786, |
|
"tokens_seen": 3069181952 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.527801539777588e-05, |
|
"loss": 2.3828, |
|
"theoretical_loss": 3.3149474994273036, |
|
"tokens_seen": 3069313024 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.523524379811805e-05, |
|
"loss": 2.5782, |
|
"theoretical_loss": 3.314936665945008, |
|
"tokens_seen": 3069444096 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.519247219846023e-05, |
|
"loss": 2.5156, |
|
"theoretical_loss": 3.314925833054841, |
|
"tokens_seen": 3069575168 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.51497005988024e-05, |
|
"loss": 2.4908, |
|
"theoretical_loss": 3.3149150007567454, |
|
"tokens_seen": 3069706240 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.510692899914457e-05, |
|
"loss": 2.5009, |
|
"theoretical_loss": 3.314904169050664, |
|
"tokens_seen": 3069837312 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.506415739948674e-05, |
|
"loss": 2.4503, |
|
"theoretical_loss": 3.3148933379365384, |
|
"tokens_seen": 3069968384 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.502138579982892e-05, |
|
"loss": 2.4305, |
|
"theoretical_loss": 3.3148825074143113, |
|
"tokens_seen": 3070099456 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.49786142001711e-05, |
|
"loss": 2.3984, |
|
"theoretical_loss": 3.314871677483925, |
|
"tokens_seen": 3070230528 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.493584260051326e-05, |
|
"loss": 2.4805, |
|
"theoretical_loss": 3.3148608481453223, |
|
"tokens_seen": 3070361600 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.489307100085543e-05, |
|
"loss": 2.4796, |
|
"theoretical_loss": 3.3148500193984454, |
|
"tokens_seen": 3070492672 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.48502994011976e-05, |
|
"loss": 2.5145, |
|
"theoretical_loss": 3.3148391912432364, |
|
"tokens_seen": 3070623744 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"objective/train/docs_used": 1682472, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.632871150970459, |
|
"objective/train/theoretical_loss": 3.3148337773874896, |
|
"objective/train/tokens_used": 100740576, |
|
"theoretical_loss": 3.3148337773874896, |
|
"tokens_seen": 3070689280 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.480752780153978e-05, |
|
"loss": 2.4604, |
|
"theoretical_loss": 3.3148283636796383, |
|
"tokens_seen": 3070754816 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.476475620188196e-05, |
|
"loss": 2.4658, |
|
"theoretical_loss": 3.3148175367075927, |
|
"tokens_seen": 3070885888 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.472198460222413e-05, |
|
"loss": 2.4759, |
|
"theoretical_loss": 3.314806710327043, |
|
"tokens_seen": 3071016960 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.467921300256629e-05, |
|
"loss": 2.6602, |
|
"theoretical_loss": 3.3147958845379306, |
|
"tokens_seen": 3071148032 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.463644140290847e-05, |
|
"loss": 2.3896, |
|
"theoretical_loss": 3.3147850593401986, |
|
"tokens_seen": 3071279104 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.459366980325065e-05, |
|
"loss": 2.587, |
|
"theoretical_loss": 3.3147742347337896, |
|
"tokens_seen": 3071410176 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.455089820359282e-05, |
|
"loss": 2.719, |
|
"theoretical_loss": 3.3147634107186454, |
|
"tokens_seen": 3071541248 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.450812660393499e-05, |
|
"loss": 2.5003, |
|
"theoretical_loss": 3.3147525872947092, |
|
"tokens_seen": 3071672320 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.446535500427715e-05, |
|
"loss": 2.344, |
|
"theoretical_loss": 3.3147417644619233, |
|
"tokens_seen": 3071803392 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.442258340461933e-05, |
|
"loss": 2.5274, |
|
"theoretical_loss": 3.31473094222023, |
|
"tokens_seen": 3071934464 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.437981180496151e-05, |
|
"loss": 2.6219, |
|
"theoretical_loss": 3.3147201205695715, |
|
"tokens_seen": 3072065536 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.433704020530369e-05, |
|
"loss": 2.4226, |
|
"theoretical_loss": 3.3147092995098912, |
|
"tokens_seen": 3072196608 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"objective/train/docs_used": 1683394, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5833144187927246, |
|
"objective/train/theoretical_loss": 3.3146984790411307, |
|
"objective/train/tokens_used": 102378976, |
|
"theoretical_loss": 3.3146984790411307, |
|
"tokens_seen": 3072327680 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.429426860564585e-05, |
|
"loss": 2.4399, |
|
"theoretical_loss": 3.3146984790411307, |
|
"tokens_seen": 3072327680 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.425149700598802e-05, |
|
"loss": 2.6166, |
|
"theoretical_loss": 3.314687659163233, |
|
"tokens_seen": 3072458752 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.420872540633021e-05, |
|
"loss": 2.2906, |
|
"theoretical_loss": 3.3146768398761406, |
|
"tokens_seen": 3072589824 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.416595380667238e-05, |
|
"loss": 2.5399, |
|
"theoretical_loss": 3.3146660211797956, |
|
"tokens_seen": 3072720896 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.412318220701454e-05, |
|
"loss": 2.4276, |
|
"theoretical_loss": 3.3146552030741416, |
|
"tokens_seen": 3072851968 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.408041060735672e-05, |
|
"loss": 2.5037, |
|
"theoretical_loss": 3.31464438555912, |
|
"tokens_seen": 3072983040 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.403763900769888e-05, |
|
"loss": 2.4357, |
|
"theoretical_loss": 3.314633568634674, |
|
"tokens_seen": 3073114112 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.399486740804107e-05, |
|
"loss": 2.5825, |
|
"theoretical_loss": 3.314622752300746, |
|
"tokens_seen": 3073245184 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.395209580838324e-05, |
|
"loss": 2.4643, |
|
"theoretical_loss": 3.3146119365572786, |
|
"tokens_seen": 3073376256 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.39093242087254e-05, |
|
"loss": 2.3604, |
|
"theoretical_loss": 3.3146011214042144, |
|
"tokens_seen": 3073507328 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.386655260906758e-05, |
|
"loss": 2.4892, |
|
"theoretical_loss": 3.314590306841496, |
|
"tokens_seen": 3073638400 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.382378100940975e-05, |
|
"loss": 2.6183, |
|
"theoretical_loss": 3.3145794928690657, |
|
"tokens_seen": 3073769472 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.378100940975194e-05, |
|
"loss": 2.5466, |
|
"theoretical_loss": 3.3145686794868667, |
|
"tokens_seen": 3073900544 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"objective/train/docs_used": 1684169, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.35479998588562, |
|
"objective/train/theoretical_loss": 3.314563273017086, |
|
"objective/train/tokens_used": 104017376, |
|
"theoretical_loss": 3.314563273017086, |
|
"tokens_seen": 3073966080 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.37382378100941e-05, |
|
"loss": 2.5362, |
|
"theoretical_loss": 3.314557866694841, |
|
"tokens_seen": 3074031616 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.369546621043627e-05, |
|
"loss": 2.647, |
|
"theoretical_loss": 3.314547054492932, |
|
"tokens_seen": 3074162688 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.365269461077845e-05, |
|
"loss": 2.5716, |
|
"theoretical_loss": 3.314536242881082, |
|
"tokens_seen": 3074293760 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.360992301112061e-05, |
|
"loss": 2.6096, |
|
"theoretical_loss": 3.3145254318592325, |
|
"tokens_seen": 3074424832 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.356715141146279e-05, |
|
"loss": 2.4451, |
|
"theoretical_loss": 3.3145146214273282, |
|
"tokens_seen": 3074555904 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.352437981180497e-05, |
|
"loss": 2.4806, |
|
"theoretical_loss": 3.3145038115853103, |
|
"tokens_seen": 3074686976 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.348160821214713e-05, |
|
"loss": 2.5374, |
|
"theoretical_loss": 3.314493002333122, |
|
"tokens_seen": 3074818048 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.343883661248931e-05, |
|
"loss": 2.6111, |
|
"theoretical_loss": 3.3144821936707056, |
|
"tokens_seen": 3074949120 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.339606501283149e-05, |
|
"loss": 2.4581, |
|
"theoretical_loss": 3.3144713855980044, |
|
"tokens_seen": 3075080192 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.335329341317365e-05, |
|
"loss": 2.4606, |
|
"theoretical_loss": 3.3144605781149608, |
|
"tokens_seen": 3075211264 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.331052181351583e-05, |
|
"loss": 2.4645, |
|
"theoretical_loss": 3.314449771221517, |
|
"tokens_seen": 3075342336 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.3267750213858e-05, |
|
"loss": 2.5643, |
|
"theoretical_loss": 3.3144389649176165, |
|
"tokens_seen": 3075473408 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"objective/train/docs_used": 1684867, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6634860038757324, |
|
"objective/train/theoretical_loss": 3.3144281592032017, |
|
"objective/train/tokens_used": 105655776, |
|
"theoretical_loss": 3.3144281592032017, |
|
"tokens_seen": 3075604480 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.322497861420018e-05, |
|
"loss": 2.6146, |
|
"theoretical_loss": 3.3144281592032017, |
|
"tokens_seen": 3075604480 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.318220701454235e-05, |
|
"loss": 2.4308, |
|
"theoretical_loss": 3.3144173540782154, |
|
"tokens_seen": 3075735552 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.313943541488452e-05, |
|
"loss": 2.4509, |
|
"theoretical_loss": 3.3144065495426, |
|
"tokens_seen": 3075866624 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.30966638152267e-05, |
|
"loss": 2.4845, |
|
"theoretical_loss": 3.3143957455962982, |
|
"tokens_seen": 3075997696 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.305389221556886e-05, |
|
"loss": 2.6057, |
|
"theoretical_loss": 3.3143849422392533, |
|
"tokens_seen": 3076128768 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.301112061591104e-05, |
|
"loss": 2.4856, |
|
"theoretical_loss": 3.3143741394714077, |
|
"tokens_seen": 3076259840 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.296834901625322e-05, |
|
"loss": 2.6096, |
|
"theoretical_loss": 3.314363337292704, |
|
"tokens_seen": 3076390912 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.292557741659538e-05, |
|
"loss": 2.5767, |
|
"theoretical_loss": 3.314352535703086, |
|
"tokens_seen": 3076521984 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.288280581693756e-05, |
|
"loss": 2.6051, |
|
"theoretical_loss": 3.314341734702495, |
|
"tokens_seen": 3076653056 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.284003421727973e-05, |
|
"loss": 2.472, |
|
"theoretical_loss": 3.3143309342908744, |
|
"tokens_seen": 3076784128 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.279726261762189e-05, |
|
"loss": 2.5424, |
|
"theoretical_loss": 3.314320134468167, |
|
"tokens_seen": 3076915200 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.275449101796408e-05, |
|
"loss": 2.6118, |
|
"theoretical_loss": 3.3143093352343165, |
|
"tokens_seen": 3077046272 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.271171941830625e-05, |
|
"loss": 2.4979, |
|
"theoretical_loss": 3.314298536589264, |
|
"tokens_seen": 3077177344 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"objective/train/docs_used": 1686119, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6551506519317627, |
|
"objective/train/theoretical_loss": 3.3142931374875197, |
|
"objective/train/tokens_used": 107294176, |
|
"theoretical_loss": 3.3142931374875197, |
|
"tokens_seen": 3077242880 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.266894781864843e-05, |
|
"loss": 2.5443, |
|
"theoretical_loss": 3.3142877385329537, |
|
"tokens_seen": 3077308416 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.262617621899059e-05, |
|
"loss": 2.5527, |
|
"theoretical_loss": 3.314276941065328, |
|
"tokens_seen": 3077439488 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.258340461933276e-05, |
|
"loss": 2.6246, |
|
"theoretical_loss": 3.3142661441863295, |
|
"tokens_seen": 3077570560 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.254063301967495e-05, |
|
"loss": 2.5297, |
|
"theoretical_loss": 3.3142553478959007, |
|
"tokens_seen": 3077701632 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.249786142001711e-05, |
|
"loss": 2.4888, |
|
"theoretical_loss": 3.3142445521939856, |
|
"tokens_seen": 3077832704 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.245508982035929e-05, |
|
"loss": 2.5649, |
|
"theoretical_loss": 3.314233757080526, |
|
"tokens_seen": 3077963776 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.241231822070146e-05, |
|
"loss": 2.5155, |
|
"theoretical_loss": 3.3142229625554656, |
|
"tokens_seen": 3078094848 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.236954662104363e-05, |
|
"loss": 2.5393, |
|
"theoretical_loss": 3.3142121686187465, |
|
"tokens_seen": 3078225920 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.232677502138581e-05, |
|
"loss": 2.5875, |
|
"theoretical_loss": 3.3142013752703123, |
|
"tokens_seen": 3078356992 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.228400342172798e-05, |
|
"loss": 2.4805, |
|
"theoretical_loss": 3.314190582510105, |
|
"tokens_seen": 3078488064 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.224123182207014e-05, |
|
"loss": 2.5378, |
|
"theoretical_loss": 3.3141797903380685, |
|
"tokens_seen": 3078619136 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.219846022241232e-05, |
|
"loss": 2.5374, |
|
"theoretical_loss": 3.314168998754145, |
|
"tokens_seen": 3078750208 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"objective/train/docs_used": 1686853, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4723048210144043, |
|
"objective/train/theoretical_loss": 3.314158207758278, |
|
"objective/train/tokens_used": 108932576, |
|
"theoretical_loss": 3.314158207758278, |
|
"tokens_seen": 3078881280 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.21556886227545e-05, |
|
"loss": 2.7317, |
|
"theoretical_loss": 3.314158207758278, |
|
"tokens_seen": 3078881280 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.211291702309668e-05, |
|
"loss": 2.5378, |
|
"theoretical_loss": 3.3141474173504095, |
|
"tokens_seen": 3079012352 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.207014542343884e-05, |
|
"loss": 2.6327, |
|
"theoretical_loss": 3.3141366275304835, |
|
"tokens_seen": 3079143424 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.2027373823781e-05, |
|
"loss": 2.5976, |
|
"theoretical_loss": 3.3141258382984424, |
|
"tokens_seen": 3079274496 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.198460222412318e-05, |
|
"loss": 2.6818, |
|
"theoretical_loss": 3.3141150496542293, |
|
"tokens_seen": 3079405568 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.194183062446536e-05, |
|
"loss": 2.6468, |
|
"theoretical_loss": 3.3141042615977865, |
|
"tokens_seen": 3079536640 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.189905902480754e-05, |
|
"loss": 2.4813, |
|
"theoretical_loss": 3.314093474129058, |
|
"tokens_seen": 3079667712 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.18562874251497e-05, |
|
"loss": 2.5804, |
|
"theoretical_loss": 3.314082687247986, |
|
"tokens_seen": 3079798784 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.181351582549187e-05, |
|
"loss": 2.4319, |
|
"theoretical_loss": 3.314071900954514, |
|
"tokens_seen": 3079929856 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.177074422583405e-05, |
|
"loss": 2.5641, |
|
"theoretical_loss": 3.3140611152485846, |
|
"tokens_seen": 3080060928 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.172797262617623e-05, |
|
"loss": 2.609, |
|
"theoretical_loss": 3.314050330130141, |
|
"tokens_seen": 3080192000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.16852010265184e-05, |
|
"loss": 2.4717, |
|
"theoretical_loss": 3.314039545599126, |
|
"tokens_seen": 3080323072 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.164242942686057e-05, |
|
"loss": 2.5039, |
|
"theoretical_loss": 3.314028761655483, |
|
"tokens_seen": 3080454144 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"objective/train/docs_used": 1688263, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.41924786567688, |
|
"objective/train/theoretical_loss": 3.314023369903908, |
|
"objective/train/tokens_used": 110570976, |
|
"theoretical_loss": 3.314023369903908, |
|
"tokens_seen": 3080519680 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.159965782720273e-05, |
|
"loss": 2.4467, |
|
"theoretical_loss": 3.3140179782991552, |
|
"tokens_seen": 3080585216 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.155688622754491e-05, |
|
"loss": 2.6404, |
|
"theoretical_loss": 3.3140071955300847, |
|
"tokens_seen": 3080716288 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.151411462788709e-05, |
|
"loss": 2.5012, |
|
"theoretical_loss": 3.3139964133482147, |
|
"tokens_seen": 3080847360 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.147134302822926e-05, |
|
"loss": 2.469, |
|
"theoretical_loss": 3.3139856317534893, |
|
"tokens_seen": 3080978432 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 2.5803, |
|
"theoretical_loss": 3.31397485074585, |
|
"tokens_seen": 3081109504 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.13857998289136e-05, |
|
"loss": 2.4955, |
|
"theoretical_loss": 3.3139640703252415, |
|
"tokens_seen": 3081240576 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.134302822925578e-05, |
|
"loss": 2.5004, |
|
"theoretical_loss": 3.3139532904916056, |
|
"tokens_seen": 3081371648 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.130025662959796e-05, |
|
"loss": 2.5855, |
|
"theoretical_loss": 3.3139425112448864, |
|
"tokens_seen": 3081502720 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.125748502994012e-05, |
|
"loss": 2.543, |
|
"theoretical_loss": 3.3139317325850257, |
|
"tokens_seen": 3081633792 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.12147134302823e-05, |
|
"loss": 2.4956, |
|
"theoretical_loss": 3.3139209545119677, |
|
"tokens_seen": 3081764864 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.117194183062446e-05, |
|
"loss": 2.3803, |
|
"theoretical_loss": 3.313910177025655, |
|
"tokens_seen": 3081895936 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.112917023096664e-05, |
|
"loss": 2.4136, |
|
"theoretical_loss": 3.3138994001260307, |
|
"tokens_seen": 3082027008 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"objective/train/docs_used": 1688735, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.326685667037964, |
|
"objective/train/theoretical_loss": 3.313888623813038, |
|
"objective/train/tokens_used": 112209376, |
|
"theoretical_loss": 3.313888623813038, |
|
"tokens_seen": 3082158080 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.108639863130882e-05, |
|
"loss": 2.4642, |
|
"theoretical_loss": 3.313888623813038, |
|
"tokens_seen": 3082158080 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.104362703165098e-05, |
|
"loss": 2.5382, |
|
"theoretical_loss": 3.3138778480866202, |
|
"tokens_seen": 3082289152 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.100085543199316e-05, |
|
"loss": 2.527, |
|
"theoretical_loss": 3.3138670729467203, |
|
"tokens_seen": 3082420224 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.095808383233533e-05, |
|
"loss": 2.5672, |
|
"theoretical_loss": 3.3138562983932816, |
|
"tokens_seen": 3082551296 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.09153122326775e-05, |
|
"loss": 2.4753, |
|
"theoretical_loss": 3.313845524426247, |
|
"tokens_seen": 3082682368 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.087254063301968e-05, |
|
"loss": 2.6285, |
|
"theoretical_loss": 3.3138347510455595, |
|
"tokens_seen": 3082813440 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.082976903336185e-05, |
|
"loss": 2.4997, |
|
"theoretical_loss": 3.3138239782511625, |
|
"tokens_seen": 3082944512 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.078699743370403e-05, |
|
"loss": 2.6724, |
|
"theoretical_loss": 3.3138132060429992, |
|
"tokens_seen": 3083075584 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.074422583404619e-05, |
|
"loss": 2.5294, |
|
"theoretical_loss": 3.313802434421013, |
|
"tokens_seen": 3083206656 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.070145423438837e-05, |
|
"loss": 2.442, |
|
"theoretical_loss": 3.313791663385146, |
|
"tokens_seen": 3083337728 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.065868263473055e-05, |
|
"loss": 2.5894, |
|
"theoretical_loss": 3.313780892935343, |
|
"tokens_seen": 3083468800 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.061591103507271e-05, |
|
"loss": 2.5735, |
|
"theoretical_loss": 3.313770123071546, |
|
"tokens_seen": 3083599872 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.057313943541489e-05, |
|
"loss": 2.5645, |
|
"theoretical_loss": 3.313759353793699, |
|
"tokens_seen": 3083730944 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"objective/train/docs_used": 1689720, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.1338090896606445, |
|
"objective/train/theoretical_loss": 3.313753969374489, |
|
"objective/train/tokens_used": 113847776, |
|
"theoretical_loss": 3.313753969374489, |
|
"tokens_seen": 3083796480 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.053036783575706e-05, |
|
"loss": 2.4857, |
|
"theoretical_loss": 3.3137485851017447, |
|
"tokens_seen": 3083862016 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.048759623609923e-05, |
|
"loss": 2.6054, |
|
"theoretical_loss": 3.3137378169956264, |
|
"tokens_seen": 3083993088 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.044482463644141e-05, |
|
"loss": 2.6083, |
|
"theoretical_loss": 3.313727049475287, |
|
"tokens_seen": 3084124160 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.040205303678358e-05, |
|
"loss": 2.3527, |
|
"theoretical_loss": 3.3137162825406707, |
|
"tokens_seen": 3084255232 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.035928143712576e-05, |
|
"loss": 2.4356, |
|
"theoretical_loss": 3.31370551619172, |
|
"tokens_seen": 3084386304 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.031650983746792e-05, |
|
"loss": 2.6893, |
|
"theoretical_loss": 3.313694750428378, |
|
"tokens_seen": 3084517376 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.02737382378101e-05, |
|
"loss": 2.3975, |
|
"theoretical_loss": 3.313683985250589, |
|
"tokens_seen": 3084648448 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 7.023096663815228e-05, |
|
"loss": 2.5608, |
|
"theoretical_loss": 3.313673220658295, |
|
"tokens_seen": 3084779520 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.018819503849444e-05, |
|
"loss": 2.4643, |
|
"theoretical_loss": 3.31366245665144, |
|
"tokens_seen": 3084910592 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.01454234388366e-05, |
|
"loss": 2.395, |
|
"theoretical_loss": 3.3136516932299673, |
|
"tokens_seen": 3085041664 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.010265183917878e-05, |
|
"loss": 2.5804, |
|
"theoretical_loss": 3.3136409303938197, |
|
"tokens_seen": 3085172736 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.005988023952096e-05, |
|
"loss": 2.5382, |
|
"theoretical_loss": 3.313630168142941, |
|
"tokens_seen": 3085303808 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"objective/train/docs_used": 1690994, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.813086748123169, |
|
"objective/train/theoretical_loss": 3.3136194064772746, |
|
"objective/train/tokens_used": 115486176, |
|
"theoretical_loss": 3.3136194064772746, |
|
"tokens_seen": 3085434880 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.001710863986314e-05, |
|
"loss": 2.7161, |
|
"theoretical_loss": 3.3136194064772746, |
|
"tokens_seen": 3085434880 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.99743370402053e-05, |
|
"loss": 2.5303, |
|
"theoretical_loss": 3.3136086453967635, |
|
"tokens_seen": 3085565952 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.993156544054747e-05, |
|
"loss": 2.5129, |
|
"theoretical_loss": 3.313597884901351, |
|
"tokens_seen": 3085697024 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.988879384088965e-05, |
|
"loss": 2.4289, |
|
"theoretical_loss": 3.3135871249909803, |
|
"tokens_seen": 3085828096 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.984602224123183e-05, |
|
"loss": 2.4337, |
|
"theoretical_loss": 3.3135763656655954, |
|
"tokens_seen": 3085959168 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.9803250641574e-05, |
|
"loss": 2.4339, |
|
"theoretical_loss": 3.313565606925139, |
|
"tokens_seen": 3086090240 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.976047904191617e-05, |
|
"loss": 2.3467, |
|
"theoretical_loss": 3.313554848769555, |
|
"tokens_seen": 3086221312 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.971770744225834e-05, |
|
"loss": 2.5069, |
|
"theoretical_loss": 3.313544091198786, |
|
"tokens_seen": 3086352384 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.967493584260051e-05, |
|
"loss": 2.3418, |
|
"theoretical_loss": 3.313533334212776, |
|
"tokens_seen": 3086483456 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.963216424294269e-05, |
|
"loss": 2.6456, |
|
"theoretical_loss": 3.3135225778114683, |
|
"tokens_seen": 3086614528 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.958939264328487e-05, |
|
"loss": 2.4547, |
|
"theoretical_loss": 3.313511821994806, |
|
"tokens_seen": 3086745600 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.954662104362704e-05, |
|
"loss": 2.459, |
|
"theoretical_loss": 3.313501066762733, |
|
"tokens_seen": 3086876672 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.95038494439692e-05, |
|
"loss": 2.5895, |
|
"theoretical_loss": 3.3134903121151926, |
|
"tokens_seen": 3087007744 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"objective/train/docs_used": 1691628, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.380619525909424, |
|
"objective/train/theoretical_loss": 3.313484935010604, |
|
"objective/train/tokens_used": 117124576, |
|
"theoretical_loss": 3.313484935010604, |
|
"tokens_seen": 3087073280 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.946107784431138e-05, |
|
"loss": 2.5606, |
|
"theoretical_loss": 3.3134795580521277, |
|
"tokens_seen": 3087138816 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.941830624465356e-05, |
|
"loss": 2.6642, |
|
"theoretical_loss": 3.313468804573482, |
|
"tokens_seen": 3087269888 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.937553464499572e-05, |
|
"loss": 2.3761, |
|
"theoretical_loss": 3.313458051679199, |
|
"tokens_seen": 3087400960 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.93327630453379e-05, |
|
"loss": 2.6433, |
|
"theoretical_loss": 3.3134472993692223, |
|
"tokens_seen": 3087532032 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.928999144568006e-05, |
|
"loss": 2.5497, |
|
"theoretical_loss": 3.3134365476434953, |
|
"tokens_seen": 3087663104 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.924721984602224e-05, |
|
"loss": 2.5376, |
|
"theoretical_loss": 3.313425796501961, |
|
"tokens_seen": 3087794176 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 6.920444824636442e-05, |
|
"loss": 2.6459, |
|
"theoretical_loss": 3.3134150459445633, |
|
"tokens_seen": 3087925248 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 6.916167664670659e-05, |
|
"loss": 2.7084, |
|
"theoretical_loss": 3.313404295971245, |
|
"tokens_seen": 3088056320 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 6.911890504704876e-05, |
|
"loss": 2.5354, |
|
"theoretical_loss": 3.313393546581951, |
|
"tokens_seen": 3088187392 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 6.907613344739093e-05, |
|
"loss": 2.4561, |
|
"theoretical_loss": 3.3133827977766237, |
|
"tokens_seen": 3088318464 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 6.903336184773311e-05, |
|
"loss": 2.6013, |
|
"theoretical_loss": 3.3133720495552064, |
|
"tokens_seen": 3088449536 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 6.899059024807529e-05, |
|
"loss": 2.5218, |
|
"theoretical_loss": 3.3133613019176433, |
|
"tokens_seen": 3088580608 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"objective/train/docs_used": 1692791, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3386290073394775, |
|
"objective/train/theoretical_loss": 3.3133505548638778, |
|
"objective/train/tokens_used": 118762976, |
|
"theoretical_loss": 3.3133505548638778, |
|
"tokens_seen": 3088711680 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 6.894781864841745e-05, |
|
"loss": 2.5461, |
|
"theoretical_loss": 3.3133505548638778, |
|
"tokens_seen": 3088711680 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 6.890504704875963e-05, |
|
"loss": 2.6487, |
|
"theoretical_loss": 3.3133398083938532, |
|
"tokens_seen": 3088842752 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 6.886227544910179e-05, |
|
"loss": 2.6339, |
|
"theoretical_loss": 3.313329062507513, |
|
"tokens_seen": 3088973824 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 6.881950384944397e-05, |
|
"loss": 2.525, |
|
"theoretical_loss": 3.313318317204801, |
|
"tokens_seen": 3089104896 |
|
} |
|
], |
|
"max_steps": 2362, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 5.0368742424576e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|